Let’s implement Word2Vec Sigmoid Function def sigmoid(x): """ Compute the sigmoid function for the input here. Arguments: x -- A scalar or numpy array. Return: s -- sigmoid(x) """ return s Answer def sigmoid(x): return 1/(1+np.exp(-x)) Naive Softmax Loss + Gradient def naiveSoftmaxLossAndGradient( centerWordVec, outsideWordIdx, outsideVectors, dataset ): """ Naive Softmax loss & gradient function for word2vec models Implement the naive softmax loss and gradients between a center word's embedding and an outside word's embedding. This will be the building block for our word2vec models. For those unfamiliar with numpy notation, note that a numpy ndarray with a shape of (x, ) is a one-dimensional array, which you can effectively treat as a vector with length x. Arguments: centerWordVec -- numpy ndarray, center word's embedding in shape (word vector length, ) (v_c in the pdf handout) outsideWordIdx -- integer, the index of the outside word (o of u_o in the pdf handout) outsideVectors -- outside vectors is in shape (num words in vocab, word vector length) for all words in vocab (tranpose of U in the pdf handout) dataset -- needed for negative sampling, unused here. Return: loss -- naive softmax loss gradCenterVec -- the gradient with respect to the center word vector in shape (word vector length, ) (dJ / dv_c in the pdf handout) gradOutsideVecs -- the gradient with respect to all the outside word vectors in shape (num words in vocab, word vector length) (dJ / dU) """ ### YOUR CODE HERE (~6-8 Lines) ### Please use the provided softmax function (imported earlier in this file) ### This numerically stable implementation helps you avoid issues pertaining ### to integer overflow. y_hat = softmax(np.dot(outsideVectors, centerWordVec)) y = np.zeros(y_hat.shape) y[outsideWordIdx] = 1 loss = -np.log(y_hat[outsideWordIdx]) gradCenterVec = np.dot(outsideVectors.T, y_hat - y) gradOutsideVecs = np.outer(y_hat - y, centerWordVec) ### END YOUR CODE return loss, gradCenterVec, gradOutsideVecs Answer def naiveSoftmaxLossAndGradient( centerWordVec, outsideWordIdx, outsideVectors, dataset ): """ Naive Softmax loss & gradient function for word2vec models Implement the naive softmax loss and gradients between a center word's embedding and an outside word's embedding. This will be the building block for our word2vec models. For those unfamiliar with numpy notation, note that a numpy ndarray with a shape of (x, ) is a one-dimensional array, which you can effectively treat as a vector with length x. Arguments: centerWordVec -- numpy ndarray, center word's embedding in shape (word vector length, ) (v_c in the pdf handout) outsideWordIdx -- integer, the index of the outside word (o of u_o in the pdf handout) outsideVectors -- outside vectors is in shape (num words in vocab, word vector length) for all words in vocab (tranpose of U in the pdf handout) dataset -- needed for negative sampling, unused here. Return: loss -- naive softmax loss gradCenterVec -- the gradient with respect to the center word vector in shape (word vector length, ) (dJ / dv_c in the pdf handout) gradOutsideVecs -- the gradient with respect to all the outside word vectors in shape (num words in vocab, word vector length) (dJ / dU) """ ### YOUR CODE HERE (~6-8 Lines) ### Please use the provided softmax function (imported earlier in this file) ### This numerically stable implementation helps you avoid issues pertaining ### to integer overflow. y_hat = softmax(np.dot(outsideVectors, centerWordVec)) y = np.zeros(y_hat.shape) y[outsideWordIdx] = 1 loss = -np.log(y_hat[outsideWordIdx]) gradCenterVec = np.dot(outsideVectors.T, y_hat - y) gradOutsideVecs = np.outer(y_hat - y, centerWordVec) ### END YOUR CODE return loss, gradCenterVec, gradOutsideVecs Negative Sampling loss and gradient def getNegativeSamples(outsideWordIdx, dataset, K): """ Samples K indexes which are not the outsideWordIdx """ negSampleWordIndices = [None] * K for k in range(K): newidx = dataset.sampleTokenIdx() while newidx == outsideWordIdx: newidx = dataset.sampleTokenIdx() negSampleWordIndices[k] = newidx return negSampleWordIndices def negSamplingLossAndGradient( centerWordVec, outsideWordIdx, outsideVectors, dataset, K=10 ): """ Negative sampling loss function for word2vec models Implement the negative sampling loss and gradients for a centerWordVec and a outsideWordIdx word vector as a building block for word2vec models. K is the number of negative samples to take. Note: The same word may be negatively sampled multiple times. For example if an outside word is sampled twice, you shall have to double count the gradient with respect to this word. Thrice if it was sampled three times, and so forth. Arguments/Return Specifications: same as naiveSoftmaxLossAndGradient """ # Negative sampling of words is done for you. Do not modify this if you # wish to match the autograder and receive points! negSampleWordIndices = getNegativeSamples(outsideWordIdx, dataset, K) indices = [outsideWordIdx] + negSampleWordIndices ### YOUR CODE HERE (~10 Lines) ### Please use your implementation of sigmoid in here. u_o = outsideVectors[outsideWordIdx] u_k = outsideVectors[negSampleWordIndices] v_c = centerWordVec loss = -np.log(sigmoid(np.dot(u_o.T, v_c))) - np.sum(np.log(sigmoid(-np.dot(u_k, v_c)))) gradCenterVec = -(1-sigmoid(np.dot(u_o.T, v_c))) * u_o - np.dot(-(1-sigmoid(-np.dot(u_k, v_c))).T, u_k) gradOutsideVecs = np.zeros(outsideVectors.shape) gradOutsideVecs[outsideWordIdx, :] = (sigmoid(np.dot(u_o.T, v_c)) - 1) * v_c unique, indices, counts = np.unique(negSampleWordIndices, return_index=True, return_counts=True) sigmoidUniqueSamplesCenter = sigmoid(-np.dot(u_k, v_c))[indices] gradOutsideVecs[unique, :] = - ((sigmoidUniqueSamplesCenter - 1) * counts)[:, None] * v_c[None, :] ### END YOUR CODE return loss, gradCenterVec, gradOutsideVecs Answer def getNegativeSamples(outsideWordIdx, dataset, K): """ Samples K indexes which are not the outsideWordIdx """ negSampleWordIndices = [None] * K for k in range(K): newidx = dataset.sampleTokenIdx() while newidx == outsideWordIdx: newidx = dataset.sampleTokenIdx() negSampleWordIndices[k] = newidx return negSampleWordIndices def negSamplingLossAndGradient( centerWordVec, outsideWordIdx, outsideVectors, dataset, K=10 ): """ Negative sampling loss function for word2vec models Implement the negative sampling loss and gradients for a centerWordVec and a outsideWordIdx word vector as a building block for word2vec models. K is the number of negative samples to take. Note: The same word may be negatively sampled multiple times. For example if an outside word is sampled twice, you shall have to double count the gradient with respect to this word. Thrice if it was sampled three times, and so forth. Arguments/Return Specifications: same as naiveSoftmaxLossAndGradient """ # Negative sampling of words is done for you. Do not modify this if you # wish to match the autograder and receive points! negSampleWordIndices = getNegativeSamples(outsideWordIdx, dataset, K) indices = [outsideWordIdx] + negSampleWordIndices ### YOUR CODE HERE (~10 Lines) ### Please use your implementation of sigmoid in here. u_o = outsideVectors[outsideWordIdx] u_k = outsideVectors[negSampleWordIndices] v_c = centerWordVec loss = -np.log(sigmoid(np.dot(u_o.T, v_c))) - np.sum(np.log(sigmoid(-np.dot(u_k, v_c)))) gradCenterVec = -(1-sigmoid(np.dot(u_o.T, v_c))) * u_o - np.dot(-(1-sigmoid(-np.dot(u_k, v_c))).T, u_k) gradOutsideVecs = np.zeros(outsideVectors.shape) gradOutsideVecs[outsideWordIdx, :] = (sigmoid(np.dot(u_o.T, v_c)) - 1) * v_c unique, indices, counts = np.unique(negSampleWordIndices, return_index=True, return_counts=True) sigmoidUniqueSamplesCenter = sigmoid(-np.dot(u_k, v_c))[indices] gradOutsideVecs[unique, :] = - ((sigmoidUniqueSamplesCenter - 1) * counts)[:, None] * v_c[None, :] ### END YOUR CODE return loss, gradCenterVec, gradOutsideVecs Skip gram model def skipgram(currentCenterWord, windowSize, outsideWords, word2Ind, centerWordVectors, outsideVectors, dataset, word2vecLossAndGradient=naiveSoftmaxLossAndGradient): """ Skip-gram model in word2vec Implement the skip-gram model in this function. Arguments: currentCenterWord -- a string of the current center word windowSize -- integer, context window size outsideWords -- list of no more than 2*windowSize strings, the outside words word2Ind -- a dictionary that maps words to their indices in the word vector list centerWordVectors -- center word vectors (as rows) is in shape (num words in vocab, word vector length) for all words in vocab (V in pdf handout) outsideVectors -- outside vectors is in shape (num words in vocab, word vector length) for all words in vocab (transpose of U in the pdf handout) word2vecLossAndGradient -- the loss and gradient function for a prediction vector given the outsideWordIdx word vectors, could be one of the two loss functions you implemented above. Return: loss -- the loss function value for the skip-gram model (J in the pdf handout) gradCenterVecs -- the gradient with respect to the center word vector in shape (num words in vocab, word vector length) (dJ / dv_c in the pdf handout) gradOutsideVecs -- the gradient with respect to all the outside word vectors in shape (num words in vocab, word vector length) (dJ / dU) """ loss = 0.0 gradCenterVecs = np.zeros(centerWordVectors.shape) gradOutsideVectors = np.zeros(outsideVectors.shape) ### YOUR CODE HERE (~8 Lines) centerWordIdx = word2Ind[currentCenterWord] centerWordVec = centerWordVectors[centerWordIdx] for word in outsideWords: outsideWordIdx = word2Ind[word] gradloss, gradCenterVec, gradOutsideVecs = word2vecLossAndGradient(centerWordVec, outsideWordIdx, outsideVectors, dataset) loss += gradloss gradCenterVecs[centerWordIdx] += gradCenterVec gradOutsideVectors += gradOutsideVecs ### END YOUR CODE return loss, gradCenterVecs, gradOutsideVectors Answer def skipgram(currentCenterWord, windowSize, outsideWords, word2Ind, centerWordVectors, outsideVectors, dataset, word2vecLossAndGradient=naiveSoftmaxLossAndGradient): """ Skip-gram model in word2vec Implement the skip-gram model in this function. Arguments: currentCenterWord -- a string of the current center word windowSize -- integer, context window size outsideWords -- list of no more than 2*windowSize strings, the outside words word2Ind -- a dictionary that maps words to their indices in the word vector list centerWordVectors -- center word vectors (as rows) is in shape (num words in vocab, word vector length) for all words in vocab (V in pdf handout) outsideVectors -- outside vectors is in shape (num words in vocab, word vector length) for all words in vocab (transpose of U in the pdf handout) word2vecLossAndGradient -- the loss and gradient function for a prediction vector given the outsideWordIdx word vectors, could be one of the two loss functions you implemented above. Return: loss -- the loss function value for the skip-gram model (J in the pdf handout) gradCenterVecs -- the gradient with respect to the center word vector in shape (num words in vocab, word vector length) (dJ / dv_c in the pdf handout) gradOutsideVecs -- the gradient with respect to all the outside word vectors in shape (num words in vocab, word vector length) (dJ / dU) """ loss = 0.0 gradCenterVecs = np.zeros(centerWordVectors.shape) gradOutsideVectors = np.zeros(outsideVectors.shape) ### YOUR CODE HERE (~8 Lines) centerWordIdx = word2Ind[currentCenterWord] centerWordVec = centerWordVectors[centerWordIdx] for word in outsideWords: outsideWordIdx = word2Ind[word] gradloss, gradCenterVec, gradOutsideVecs = word2vecLossAndGradient(centerWordVec, outsideWordIdx, outsideVectors, dataset) loss += gradloss gradCenterVecs[centerWordIdx] += gradCenterVec gradOutsideVectors += gradOutsideVecs ### END YOUR CODE return loss, gradCenterVecs, gradOutsideVectors SGD Optimizer def sgd(f, x0, step, iterations, postprocessing=None, useSaved=False, PRINT_EVERY=10): """ Stochastic Gradient Descent Implement the stochastic gradient descent method in this function. Arguments: f -- the function to optimize, it should take a single argument and yield two outputs, a loss and the gradient with respect to the arguments x0 -- the initial point to start SGD from step -- the step size for SGD iterations -- total iterations to run SGD for postprocessing -- postprocessing function for the parameters if necessary. In the case of word2vec we will need to normalize the word vectors to have unit length. PRINT_EVERY -- specifies how many iterations to output loss Return: x -- the parameter value after SGD finishes """ # Anneal learning rate every several iterations ANNEAL_EVERY = 20000 if useSaved: start_iter, oldx, state = load_saved_params() if start_iter > 0: x0 = oldx step *= 0.5 ** (start_iter / ANNEAL_EVERY) if state: random.setstate(state) else: start_iter = 0 x = x0 if not postprocessing: postprocessing = lambda x: x exploss = None for iter in range(start_iter + 1, iterations + 1): # You might want to print the progress every few iterations. loss = None ### YOUR CODE HERE (~2 lines) loss, grad = f(x) x -= step * grad ### END YOUR CODE x = postprocessing(x) if iter % PRINT_EVERY == 0: if not exploss: exploss = loss else: exploss = .95 * exploss + .05 * loss print("iter %d: %f" % (iter, exploss)) if iter % SAVE_PARAMS_EVERY == 0 and useSaved: save_params(iter, x) if iter % ANNEAL_EVERY == 0: step *= 0.5 return x Answer def sgd(f, x0, step, iterations, postprocessing=None, useSaved=False, PRINT_EVERY=10): """ Stochastic Gradient Descent Implement the stochastic gradient descent method in this function. Arguments: f -- the function to optimize, it should take a single argument and yield two outputs, a loss and the gradient with respect to the arguments x0 -- the initial point to start SGD from step -- the step size for SGD iterations -- total iterations to run SGD for postprocessing -- postprocessing function for the parameters if necessary. In the case of word2vec we will need to normalize the word vectors to have unit length. PRINT_EVERY -- specifies how many iterations to output loss Return: x -- the parameter value after SGD finishes """ # Anneal learning rate every several iterations ANNEAL_EVERY = 20000 if useSaved: start_iter, oldx, state = load_saved_params() if start_iter > 0: x0 = oldx step *= 0.5 ** (start_iter / ANNEAL_EVERY) if state: random.setstate(state) else: start_iter = 0 x = x0 if not postprocessing: postprocessing = lambda x: x exploss = None for iter in range(start_iter + 1, iterations + 1): # You might want to print the progress every few iterations. loss = None ### YOUR CODE HERE (~2 lines) loss, grad = f(x) x -= step * grad ### END YOUR CODE x = postprocessing(x) if iter % PRINT_EVERY == 0: if not exploss: exploss = loss else: exploss = .95 * exploss + .05 * loss print("iter %d: %f" % (iter, exploss)) if iter % SAVE_PARAMS_EVERY == 0 and useSaved: save_params(iter, x) if iter % ANNEAL_EVERY == 0: step *= 0.5 return x