From 23031e7e0cbd8844aa489be97fa02446a45f8a03 Mon Sep 17 00:00:00 2001 From: "Anna V. Kozlova" Date: Thu, 28 Dec 2017 13:56:17 +0300 Subject: [PATCH] Added week3 assignment --- week3/grader.py | 67 +++ week3/util.py | 18 + week3/week3-Embeddings.ipynb | 1047 ++++++++++++++++++++++++++++++++++ 3 files changed, 1132 insertions(+) create mode 100644 week3/grader.py create mode 100644 week3/util.py create mode 100644 week3/week3-Embeddings.ipynb diff --git a/week3/grader.py b/week3/grader.py new file mode 100644 index 00000000..1769c505 --- /dev/null +++ b/week3/grader.py @@ -0,0 +1,67 @@ +import requests +import json +import numpy as np +from collections import OrderedDict + +class Grader(object): + def __init__(self): + self.submission_page = 'https://www.coursera.org/api/onDemandProgrammingScriptSubmissions.v1' + self.assignment_key = '7DdYfMQFEeevjw7-W7Fr0A' + self.parts = OrderedDict([('98mDT', 'Question2Vec'), + ('nc7RP', 'HitsCount'), + ('bNp90', 'DCGScore'), + ('3gRlQ', 'W2VTokenizedRanks'), + ('mX6wS', 'StarSpaceRanks')]) + self.answers = {key: None for key in self.parts} + + @staticmethod + def ravel_output(output): + ''' + If student accidentally submitted np.array with one + element instead of number, this function will submit + this number instead + ''' + if isinstance(output, np.ndarray) and output.size == 1: + output = output.item(0) + return output + + def submit(self, email, token): + submission = { + "assignmentKey": self.assignment_key, + "submitterEmail": email, + "secret": token, + "parts": {} + } + for part, output in self.answers.items(): + if output is not None: + submission["parts"][part] = {"output": output} + else: + submission["parts"][part] = dict() + request = requests.post(self.submission_page, data=json.dumps(submission)) + response = request.json() + if request.status_code == 201: + print('Submitted to Coursera platform. See results on assignment page!') + elif u'details' in response and u'learnerMessage' in response[u'details']: + print(response[u'details'][u'learnerMessage']) + else: + print("Unknown response from Coursera: {}".format(request.status_code)) + print(response) + + def status(self): + print("You want to submit these parts:") + for part_id, part_name in self.parts.items(): + answer = self.answers[part_id] + if answer is None: + answer = '-'*10 + print("Task {}: {}".format(part_name, answer[:100] + '...')) + + def submit_part(self, part, output): + self.answers[part] = output + print("Current answer for task {} is: {}".format(self.parts[part], output[:100] + '...')) + + def submit_tag(self, tag, output): + part_id = [k for k, v in self.parts.items() if v == tag] + if len(part_id) != 1: + raise RuntimeError('cannot match tag with part_id: found {} matches'.format(len(part_id))) + part_id = part_id[0] + self.submit_part(part_id, str(self.ravel_output(output))) diff --git a/week3/util.py b/week3/util.py new file mode 100644 index 00000000..3e22ce19 --- /dev/null +++ b/week3/util.py @@ -0,0 +1,18 @@ +import re +from nltk.corpus import stopwords + +REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]') +GOOD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]') +STOPWORDS = set(stopwords.words('english')) +def text_prepare(text): + text = text.lower() + text = REPLACE_BY_SPACE_RE.sub(' ', text) + text = GOOD_SYMBOLS_RE.sub('', text) + text = ' '.join([x for x in text.split() if x and x not in STOPWORDS]) + return text.strip() + +def array_to_string(arr): + return '\n'.join(str(num) for num in arr) + +def matrix_to_string(matrix): + return '\n'.join('\t'.join(str(num) for num in line) for line in matrix) \ No newline at end of file diff --git a/week3/week3-Embeddings.ipynb b/week3/week3-Embeddings.ipynb new file mode 100644 index 00000000..66f394d8 --- /dev/null +++ b/week3/week3-Embeddings.ipynb @@ -0,0 +1,1047 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Find duplicate questions on StackOverflow by their embeddings\n", + "\n", + "In this assignment you will learn how to calculate a similarity for pieces of text. Using this approcach you will know how to find duplicate questions from [StackOverflow](https://stackoverflow.com)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Libraries\n", + "\n", + "In this task you will you will need the following libraries:\n", + "- [StarSpace](https://github.com/facebookresearch/StarSpace) — a general-purpose model for efficient learning of entity embeddings from Facebook\n", + "- [Gensim](https://radimrehurek.com/gensim/) — a tool for solving various NLP-related tasks (topic modeling, text representation, ...)\n", + "- [Numpy](http://www.numpy.org) — a package for scientific computing.\n", + "- [scikit-learn](http://scikit-learn.org/stable/index.html) — a tool for data mining and data analysis.\n", + "- [Nltk](http://www.nltk.org) — a platform to work with human language data." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Grading\n", + "We will create a grader instace below and use it to collect your answers. Note that these outputs will be stored locally inside grader and will be uploaded to platform only after running submiting function in the last part of this assignment. If you want to make partial submission, you can run that cell any time you want." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from grader import Grader" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "grader = Grader()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Word embedding\n", + "\n", + "To solve the problem, you will use two different models of embeddings:\n", + "\n", + " - [Pre-trained word vectors](https://code.google.com/archive/p/word2vec/) from Google which were trained on a part of Google News dataset (about 100 billion words). The model contains 300-dimensional vectors for 3 million words and phrases. You need to download it by following this [link](https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing).\n", + " - Representations using StarSpace on StackOverflow data sample. You will need to train them from scratch." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It's always easier to start with pretrained embeddings. Unpack the pre-trained Goggle's vectors and upload them using the function [KeyedVectors.load_word2vec_format](https://radimrehurek.com/gensim/models/keyedvectors.html) from gensim library with the parameter *binary=True*. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import gensim" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "wv_embeddings = ######### YOUR CODE HERE #############" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### How to work with Google's word2vec embeddings?\n", + "\n", + "Once you have loaded the representations, make sure you can access them. First, you can check if the loaded embeddings contain a word:\n", + " \n", + " 'word' in wv_embeddings\n", + " \n", + "Second, to get the corresponding embedding you can use the square brackets:\n", + "\n", + " wv_embeddings['word']\n", + " \n", + "### Checking that the embeddings are correct \n", + " \n", + "To prevent any errors during the first stage, we can check that the loaded embeddings are correct. You can call the function *check_embeddings*, implemented below, which runs 3 tests:\n", + "1. Find the most similar word for provided \"positive\" and \"negative\" words.\n", + "2. Find which word from the given list doesn’t go with the others.\n", + "3. Find the most similar word for the provided one.\n", + "\n", + "In the right case the function will return the string *These embeddings look good*. Othervise, you need to validate the previous steps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def check_embeddings(embeddings):\n", + " error_text = \"Something wrong with your embeddings ('%s test isn't correct).\"\n", + " most_similar = embeddings.most_similar(positive=['woman', 'king'], negative=['man'])\n", + " if len(most_similar) < 1 or most_similar[0][0] != 'queen':\n", + " return error_text % \"Most similar\"\n", + "\n", + " doesnt_match = embeddings.doesnt_match(['breakfast', 'cereal', 'dinner', 'lunch'])\n", + " if doesnt_match != 'cereal':\n", + " return error_text % \"Doesn't match\"\n", + " \n", + " most_similar_to_given = embeddings.most_similar_to_given('music', ['water', 'sound', 'backpack', 'mouse'])\n", + " if most_similar_to_given != 'sound':\n", + " return error_text % \"Most similar to given\"\n", + " \n", + " return \"These embeddings look good.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "print(check_embeddings(wv_embeddings))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## From word to text embeddings\n", + "\n", + "**Task 1 (Question2Vec).** Usually, we have word-based embeddings, but for the task we need to create a representation for the whole question. It could be done in different ways. In our case we will use a mean of all word vectors in the question. Now you need to implement the function *question_to_vec*, which calculates the question representation described above.\n", + "\n", + "Note that there could be words without the corresponding embeddings. In this case, you can just skip these words. If the question doesn't contain any known word with embedding, the function should return a zero vector." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def question_to_vec(question, embeddings, dim=300):\n", + " \"\"\"\n", + " question: a string\n", + " embeddings: dict where the key is a word and a value is its' embedding\n", + " dim: size of the representation\n", + " \n", + " result: vector representation for the question\n", + " \"\"\"\n", + " ######################################\n", + " ######### YOUR CODE HERE #############\n", + " ######################################" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To check the basic correctness of your implementation, run the function *question_to_vec_tests*." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def question_to_vec_tests():\n", + " if (np.zeros(300) != question_to_vec('', wv_embeddings)).any():\n", + " return \"You need to return zero vector for empty question.\"\n", + " if (np.zeros(300) != question_to_vec('thereisnosuchword', wv_embeddings)).any():\n", + " return \"You need to return zero vector for question, which consists only from unknown words.\"\n", + " if (wv_embeddings['word'] != question_to_vec('word', wv_embeddings)).any():\n", + " return \"You need to check the corectness of your function.\"\n", + " return \"Basic tests are passed.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "print(question_to_vec_tests())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can submit embeddings for the questions from file *test_embeddings.tsv* to earn the points. In this task you don't need to transform the text of a question somehow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from util import array_to_string" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "question2vec_result = []\n", + "for question in open('test_embeddings.tsv'):\n", + " question = question.strip()\n", + " answer = question_to_vec(question, wv_embeddings)\n", + " question2vec_result = np.append(question2vec_result, answer)\n", + "\n", + "grader.submit_tag('Question2Vec', array_to_string(question2vec_result))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we have a method to create a representation of any sentence and we are ready for the first evaluation. So, let's check how well our solution (Google's vectors + *question_to_vec*) will work.\n", + "\n", + "## Evaluation of text similarity\n", + "\n", + "We can imagine that if we use good embeddings, the cosine similarity between the duplicate sentences should be less than for the random ones. Overall, for each pair of duplicate sentences we can generate *R* random negative examples and find out the position of the correct duplicate. \n", + "\n", + "For example, we have the question *\"Exceptions What really happens\"* and we are sure that another question *\"How does the catch keyword determine the type of exception that was thrown\"* is a duplicate. But our model doesn't know it and tries to find out the best option also among questions like *\"How Can I Make These Links Rotate in PHP\"*, *\"NSLog array description not memory address\"* and *\"PECL_HTTP not recognised php ubuntu\"*. The goal of the model is to rank all these 4 questions (1 *positive* and *R* = 3 *negative*) in the way that the correct one is in the first place.\n", + "\n", + "However, it is unnatural to count on that the best candidate will be always in the first place. So let us consider the place of the best candidate in the sorted list of candidates and formulate a metric based on it. We can fix some *K* — a reasonalble number of top-ranked elements and *N* — a number of queries (size of the sample).\n", + "\n", + "### Hits@K\n", + "\n", + "The first simple metric will be a number of correct hits for some *K*:\n", + "$$ \\text{Hits@K} = \\frac{1}{N}\\sum_{i=1}^N \\, [dup_i \\in topK(q_i)]$$\n", + "\n", + "where $q_i$ is the i-th query, $dup_i$ is its duplicate, and topK($q_i$) is the top of the ranked sentences provided by our model.\n", + "\n", + "\n", + "### DCG@K\n", + "The second one is a simplified [DCG metric](https://en.wikipedia.org/wiki/Discounted_cumulative_gain):\n", + "\n", + "$$ DCG = \\frac{1}{N} \\sum_{i=1}^N\\frac{1}{\\log_2(1+rank_{dup_i})}\\cdot[rank_{dup_i} \\le K] $$\n", + "\n", + "where $rank_{dup_i}$ is a position of the duplicate in the sorted list of the nearest sentences for the query $q_i$. According this metric, the model gets a higher reward for a higher position of the correct answer. If the answer does not appear in topK at all, the reward is zero. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Evaluation examples\n", + "\n", + "Let's calculate the described metrics for the toy example introduced above. Consider the following ranking of the candidates:\n", + "1. *\"How Can I Make These Links Rotate in PHP\"*\n", + "2. *\"How does the catch keyword determine the type of exception that was thrown\"*\n", + "3. *\"NSLog array description not memory address\"*\n", + "4. *\"PECL_HTTP not recognised php ubuntu\"*\n", + "\n", + "Using the ranking above, calculate *Hits@K* metric for *K = 1, 2, 4*: \n", + " \n", + "- [K = 1] We consider only the first place and *Hits@1 = 0*\n", + "- [K = 2] We consider the first and the second places and *Hits@1 = 1*\n", + "- [K = 4] We consider the whole list and *Hits@1 = 1*\n", + "\n", + "\n", + "Using the ranking above, calculate *DCG@K* metric for *K = 1, 2, 4*:\n", + "\n", + "- [K = 1] *DCG = 0* because the correct answer doesn't appear in the top1 list.\n", + "- [K = 2] *DCG = $\\frac{1}{\\log_2{3}}$*, because $rank_{dup}$ = 2.\n", + "- [K = 4] *DCG = $\\frac{1}{\\log_2{3}}$*.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Tasks 2 and 3 (HitsCount and DCGScore).** Implement the functions *hits_count* and *dcg_score* as described above. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def hits_count(best_ranks, k):\n", + " \"\"\"\n", + " best_ranks: list with ranks for each element (the best rank is 1, the worst — len(best_ranks))\n", + " k: number of top-ranked elements\n", + " \n", + " result: float number\n", + " \"\"\"\n", + " ######################################\n", + " ######### YOUR CODE HERE #############\n", + " ######################################" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Test your code on the tiny examples:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def test_hits():\n", + " answers = ['woman', 'man']\n", + " candidates_ranking = [['woman', 'queen'], ['man', 'king']]\n", + " best_ranks = [1, 1]\n", + " correct_answers = [1, 1]\n", + " for k in range(1, 3):\n", + " if not np.isclose(hits_count(best_ranks, k), correct_answers[k - 1]):\n", + " return \"Check the function.\"\n", + "\n", + " candidates_ranking = [['woman', 'queen'], ['king', 'man']]\n", + " best_ranks = [1, 2]\n", + " correct_answers = [0.5, 1]\n", + " for k in range(1, 3):\n", + " if not np.isclose(hits_count(best_ranks, k), correct_answers[k - 1]):\n", + " return \"Check the function.\"\n", + " return \"Basic tests are passed.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "print(test_hits())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def dcg_score(best_ranks, k):\n", + " \"\"\"\n", + " best_ranks: list with ranks for each element (the best rank is 1, the worst — len(best_ranks))\n", + " k: number of top-ranked elements\n", + " \n", + " result: float number\n", + " \"\"\"\n", + " ######################################\n", + " ######### YOUR CODE HERE #############\n", + " ######################################" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def test_dcg():\n", + " answers = ['woman', 'man']\n", + " candidates_ranking = [['woman', 'queen'], ['man', 'king']]\n", + " best_ranks = [1, 1]\n", + " correct_answers = [1.0, 1.0]\n", + " for k in range(1, 3):\n", + " if not np.isclose(dcg_score(best_ranks, k), correct_answers[k - 1]):\n", + " return \"Check the function.\"\n", + "\n", + " candidates_ranking = [['woman', 'queen'], ['king', 'man']]\n", + " best_ranks = [1, 2]\n", + " correct_answers = [0.5, 0.8154]\n", + " for k in range(1, 3):\n", + " if not np.isclose(dcg_score(best_ranks, k), correct_answers[k - 1], atol=1e-03):\n", + " return \"Check the function.\"\n", + " return \"Basic tests are passed.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "print(test_dcg())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Submit results of the fuctions *hits_count* and *dcg_score* for the following examples to earn the points." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "test_examples = [\n", + " [1],\n", + " [1, 2],\n", + " [2, 1],\n", + " [1, 2, 3],\n", + " [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n", + " [9, 5, 4, 2, 8, 10, 7, 6, 1, 3],\n", + " [4, 3, 5, 1, 9, 10, 7, 8, 2, 6],\n", + " [5, 1, 7, 6, 2, 3, 8, 9, 10, 4],\n", + " [6, 3, 1, 4, 7, 2, 9, 8, 10, 5],\n", + " [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "hits_results = []\n", + "for example in test_examples:\n", + " for k in range(len(example)):\n", + " hits_results.append(hits_count(example, k + 1))\n", + "grader.submit_tag('HitsCount', array_to_string(hits_results))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "dcg_results = []\n", + "for example in test_examples:\n", + " for k in range(len(example)):\n", + " dcg_results.append(dcg_score(example, k + 1))\n", + "grader.submit_tag('DCGScore', array_to_string(dcg_results))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## First solution: pre-trained embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will work with predefined train, validation and test corpora. All the files are tab-separated, but have a different format:\n", + " - *train* corpus contains similar sentences at the same row.\n", + " - *validation* corpus contains the following columns: *question*, *similar question*, *negative example 1*, *negative example 2*, ... \n", + " - *test* corpus contains the following columns: *question*, *example 1*, *example 2*, ...\n", + "\n", + "Validation corpus will be used for the intermediate validation of models. The test data will be necessary for submitting the quality of your model in the system." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now you should upload *validation* corpus to evaluate current solution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def upload_corpus(filename):\n", + " data = []\n", + " for line in open(filename):\n", + " data.append(line.strip().split('\\t'))\n", + " return data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "validation = ######### YOUR CODE HERE #############" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from sklearn.metrics.pairwise import cosine_similarity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use cosine distance to rank candidate questions which you need to implement in the function *rank_questions*. The function should return a sorted list of pairs *(initial position in candidates list, question)*. Index of some pair corresponds to its rank (the first is the best). For example, if the list of candidates was *[a, b, c]* and the most similar is *c*, than *a* and *b*, the functions should return a list *[(2, c), (0, a), (1, b)]*.\n", + "\n", + "Pay attention, that if you will use the function *cosine_similarity* from *sklearn.metrics.pairwise* to calculate similarity. It works in a different way: most similar objects has greatest similarity." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def rank_candidates(question, candidates, embeddings, dim=300):\n", + " \"\"\"\n", + " question: a string\n", + " candidates: a list of strings (candidates) which we want to rank\n", + " embeddings: some embeddings\n", + " dim: dimension of the current embeddings\n", + " \n", + " return: a list of pairs (initial position in the list, question)\n", + " \"\"\"\n", + " \n", + " ######################################\n", + " ######### YOUR CODE HERE #############\n", + " ######################################" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Test your code on the tiny examples:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def test_rank_candidates():\n", + " questions = ['converting string to list', 'Sending array via Ajax fails']\n", + " candidates = [['Convert Google results object (pure js) to Python object', \n", + " 'C# create cookie from string and send it',\n", + " 'How to use jQuery AJAX for an outside domain?'], \n", + " ['Getting all list items of an unordered list in PHP', \n", + " 'WPF- How to update the changes in list item of a list', \n", + " 'select2 not displaying search results']]\n", + " results = [[(1, 'C# create cookie from string and send it'), \n", + " (0, 'Convert Google results object (pure js) to Python object'), \n", + " (2, 'How to use jQuery AJAX for an outside domain?')],\n", + " [(0, 'Getting all list items of an unordered list in PHP'), \n", + " (2, 'select2 not displaying search results'), \n", + " (1, 'WPF- How to update the changes in list item of a list')]]\n", + " for question, q_candidates, result in zip(questions, candidates, results):\n", + " ranks = rank_candidates(question, q_candidates, wv_embeddings, 300)\n", + " if not np.all(ranks == result):\n", + " return \"Check the function.\"\n", + " return \"Basic tests are passed.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "print(test_rank_candidates())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can test the quality of the current approach. Run the next two cells to get the results. Pay attention that calculation of similarity between vectors takes time and this calculation is computed approximately in 10 minutes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "wv_ranking = []\n", + "for line in validation:\n", + " q, *ex = line\n", + " ranks = rank_candidates(q, ex, wv_embeddings)\n", + " wv_ranking.append([r[0] for r in ranks].index(0) + 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "for k in [1, 5, 10, 100, 500, 1000]:\n", + " print(\"nDCG@%4d: %.3f | Hits@%4d: %.3f\" % (k, dcg_score(wv_ranking, k), k, hits_count(wv_ranking, k)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you did all the steps correctly, you should be frustrated by the received results. Let's try to understand why the quality is so low. First of all, when you work with some data it is necessary to have an idea how the data looks like. Print several questions from the data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "for line in validation[:3]:\n", + " q, *examples = line\n", + " print(q, *examples[:3])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you can see, we deal with the raw data. It means that we have many punctiation marks, special characters and unlowercased letters. In our case, it could lead to the situation where we can't find some embeddings, e.g. for the word \"grid?\". \n", + "\n", + "To solve this problem you should use the functions *text_prepare* from the previous assignments to prepare the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from util import text_prepare" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now transform all the questions from the validation set:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "validation_prepared = []\n", + "for line in validation:\n", + " validation_prepared.append([text_prepare(q) for q in line])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's evaluate the approach again after the tokenization:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "wv_prepared_ranking = []\n", + "for line in validation_prepared:\n", + " q, *ex = line\n", + " ranks = rank_candidates(q, ex, wv_embeddings)\n", + " wv_prepared_ranking.append([r[0] for r in ranks].index(0) + 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "for k in [1, 5, 10, 100, 500, 1000]:\n", + " print(\"nDCG@%4d: %.3f | Hits@%4d: %.3f\" % (k, dcg_score(wv_prepared_ranking, k), \n", + " k, hits_count(wv_prepared_ranking, k)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, tokenize also train and test data, because you will need it in the future:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def tokenize_file(in_, out_):\n", + " out = open(out_, 'w')\n", + " for line in open(in_):\n", + " line = line.strip().split('\\t')\n", + " new_line = [text_prepare(q) for q in line]\n", + " print(*new_line, sep='\\t', file=out)\n", + " out.close()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "######################################\n", + "######### YOUR CODE HERE #############\n", + "######################################" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Task 4 (W2VTokenizedRanks).** For each question from *test.tsv* submit the ranks of the candidates to earn the points. It should take about 3-5 minutes. Pay attention that the function *rank_candidates* returns a ranking, while in this case you should find a position in this ranking. Ranks should start with 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from util import matrix_to_string" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "w2v_ranks_results = []\n", + "for line in open('test_tokenized.txt'):\n", + " q, *ex = line.strip().split('\\t')\n", + " ranks = rank_candidates(q, ex, wv_embeddings, 300)\n", + " ranked_candidates = [r[0] for r in ranks]\n", + " w2v_ranks_results.append([ranked_candidates.index(i) + 1 for i in range(len(ranked_candidates))])\n", + " \n", + "grader.submit_tag('W2VTokenizedRanks', matrix_to_string(w2v_ranks_results))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Advanced solution: StarSpace embeddings\n", + "\n", + "Now you are ready to train your own word embeddings! In particular, you need to train embeddings specially for our task of duplicates detection.\n", + "\n", + "### How it works and what's the main difference with word2vec?\n", + "The main point in this section is that StarSpace can be trained specifically for some tasks. In contrast to word2vec model, which tries to train similar embeddings for words in similar contexts, StarSpace uses embeddings for the whole sentence (just as a sum of embeddings of words and phrases). Despite the fact that in both cases we get word embeddings as a result of the training, StarSpace embeddings are trained using some supervised data, e.g. a set of similar sentence pairs, and thus they can better suit the task.\n", + "\n", + "In our case, StarSpace should use two types of sentence pairs for training: \"positive\" and \"negative\". \"Positive\" examples are extracted from the train sample (duplicates, high similarity) and the \"negative\" examples are generated randomly (low similarity assumed). \n", + "\n", + "### How to choose the best params for the model?\n", + "Normally, you would start with some default choice and then run extensive experiments to compare different strategies. However, we have some recomendations ready for you to save your time:\n", + "- Be careful with choosing the suitable traning mode. In this task we want to explore texts similarity which corresponds to *trainMode = 3*.\n", + "- Use adagrad optimization (parameter *adagrad = true*).\n", + "- Set the length of phrase equal to 1 (parameter *ngrams*), because we need embeddings only for words.\n", + "- Don't use a large number of *epochs* (we think that 5 should be enough).\n", + "- Try dimension *dim* equal to 100.\n", + "- To compare embeddings usualy *cosine* *similarity* is used.\n", + "- Set *minCount* greater than 1 (for example, 2) if you don't want to get embeddings for extremely rare words.\n", + "- Parameter *verbose = true* could show you the progress of the training process.\n", + "- Set parameter *fileFormat* equals *labelDoc*.\n", + "- Parameter *negSearchLimit* is responsible how much negative examples will be used during the training. We think that 10 will be enought for this task.\n", + "- To increase a speed of training we recommend to set *learning rate* to 0.05." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Train StarSpace embeddings for unigrams on the train dataset. You don't need to change the format of the input data. Just don't forget to use tokenized version of the training data. \n", + "\n", + "If you follow the instruction the training process will take about 1 hour." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "######### TRAINING HAPPENING HERE #############" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "And now we can compare the new embeddings with the previous ones. You can find trained word vectors in the file *[model_file_name].tsv*. Upload the embeddings from StarSpace into a dict. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "starspace_embeddings = ######### YOUR CODE HERE #############" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "ss_prepared_ranking = []\n", + "for line in validation_prepared:\n", + " q, *ex = line\n", + " ranks = rank_candidates(q, ex, starspace_embeddings, 100)\n", + " ss_prepared_ranking.append([r[0] for r in ranks].index(0) + 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "for k in [1, 5, 10, 100, 500, 1000]:\n", + " print(\"nDCG@%4d: %.3f | Hits@%4d: %.3f\" % (k, dcg_score(ss_prepared_ranking, k), \n", + " k, hits_count(ss_prepared_ranking, k)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Due to training for the particular task with the supervised data, you should expect to obtain a higher quality than for the previous approach. In additiion, despite the fact that StarSpace's trained vectors have a smaller dimension than word2vec's, it provides better results in this task." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Task 5 (StarSpaceRanks).** For each question from *test.tsv* submit the ranks of the candidates for trained representation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "starspace_ranks_results = []\n", + "for line in open('test_tokenized.txt'):\n", + " q, *ex = line.strip().split('\\t')\n", + " ranks = rank_candidates(q, ex, starspace_embeddings, 100)\n", + " ranked_candidates = [r[0] for r in ranks]\n", + " starspace_ranks_results.append([ranked_candidates.index(i) + 1 for i in range(len(ranked_candidates))])\n", + " \n", + "grader.submit_tag('StarSpaceRanks', matrix_to_string(starspace_ranks_results))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Authorization & Submission\n", + "To submit assignment parts to Cousera platform, please, enter your e-mail and token into variables below. You can generate token on this programming assignment page. Note: Token expires 30 minutes after generation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "STUDENT_EMAIL = # EMAIL \n", + "STUDENT_TOKEN = # TOKEN \n", + "grader.status()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you want to submit these answers, run cell below" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "grader.submit(STUDENT_EMAIL, STUDENT_TOKEN)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}