From add2b4d228ff090d181aab095b6833c47360f49b Mon Sep 17 00:00:00 2001 From: filipapietrat Date: Mon, 26 Feb 2024 21:44:50 +0000 Subject: [PATCH] [lab-nlp]Filipa T --- ...ge-1.ipynb => [challenge-1]Filipa T.ipynb} | 170 +- your-code/[challenge-2]Filipa T.ipynb | 2664 +++++++++++++++++ your-code/challenge-2.ipynb | 320 -- 3 files changed, 2791 insertions(+), 363 deletions(-) rename your-code/{challenge-1.ipynb => [challenge-1]Filipa T.ipynb} (73%) create mode 100644 your-code/[challenge-2]Filipa T.ipynb delete mode 100644 your-code/challenge-2.ipynb diff --git a/your-code/challenge-1.ipynb b/your-code/[challenge-1]Filipa T.ipynb similarity index 73% rename from your-code/challenge-1.ipynb rename to your-code/[challenge-1]Filipa T.ipynb index 0808166..16f1eec 100644 --- a/your-code/challenge-1.ipynb +++ b/your-code/[challenge-1]Filipa T.ipynb @@ -66,20 +66,42 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ + "import pandas as pd\n", + "import nltk #Natural Language tool kit -- this pacakge is quite a mess. Was poorly design and the documentation is not great\n", + "from nltk.stem import WordNetLemmatizer\n", + "from nltk.corpus import stopwords\n", + "from sklearn.feature_extraction.text import CountVectorizer\n", + "from sklearn.feature_extraction.text import TfidfVectorizer" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ironhack's Q website is\n" + ] + } + ], + "source": [ + "import re\n", "def clean_up(s):\n", - " \"\"\"\n", - " Cleans up numbers, URLs, and special characters from a string.\n", - "\n", - " Args:\n", - " s: The string to be cleaned up.\n", - "\n", - " Returns:\n", - " A string that has been cleaned up.\n", - " \"\"\"" + " s = re.sub(r'http\\S+', '', s)\n", + " s = re.sub(r'[^a-zA-Z\\s\\']', ' ', s)\n", + " s = re.sub(r'\\s+', ' ', s)\n", + " return s.strip() \n", + " \n", + "string = \"@Ironhack's-#Q website 776-is http://ironhack.com [(2018)]\"\n", + "clean_string = clean_up(string)\n", + "print(clean_string)" ] }, { @@ -101,20 +123,40 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package punkt to\n", + "[nltk_data] C:\\Users\\ljant\\AppData\\Roaming\\nltk_data...\n", + "[nltk_data] Package punkt is already up-to-date!\n" + ] + }, + { + "data": { + "text/plain": [ + "['Ironhack', 's', 'Q', 'website', 'is']" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "def tokenize(s):\n", - " \"\"\"\n", - " Tokenize a string.\n", + "from nltk.tokenize import word_tokenize\n", + "nltk.download('punkt')\n", "\n", - " Args:\n", - " s: String to be tokenized.\n", + "def tokenize(s):\n", + " tokens = word_tokenize(s)\n", + " tokens = [token.replace(\"'\", \"\") for token in tokens]\n", + " return tokens\n", "\n", - " Returns:\n", - " A list of words as the result of tokenization.\n", - " \"\"\"" + "tokens = tokenize(clean_string)\n", + "tokens" ] }, { @@ -145,20 +187,43 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 22, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package wordnet to\n", + "[nltk_data] C:\\Users\\ljant\\AppData\\Roaming\\nltk_data...\n", + "[nltk_data] Package wordnet is already up-to-date!\n" + ] + }, + { + "data": { + "text/plain": [ + "['Ironhack', 's', 'Q', 'website', 'is']" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "def stem_and_lemmatize(l):\n", - " \"\"\"\n", - " Perform stemming and lemmatization on a list of words.\n", + "nltk.download('wordnet')\n", + "from nltk.stem import WordNetLemmatizer\n", + "from nltk.corpus import wordnet\n", "\n", - " Args:\n", - " l: A list of strings.\n", + "def stem_and_lemmatize(l):\n", + " lemmatizer = WordNetLemmatizer()\n", + " lemmatized = [lemmatizer.lemmatize(word) for word in l]\n", + " return lemmatized\n", "\n", - " Returns:\n", - " A list of strings after being stemmed and lemmatized.\n", - " \"\"\"" + "string = stem_and_lemmatize(tokens)\n", + "string" ] }, { @@ -176,20 +241,39 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 23, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package stopwords to\n", + "[nltk_data] C:\\Users\\ljant\\AppData\\Roaming\\nltk_data...\n", + "[nltk_data] Package stopwords is already up-to-date!\n" + ] + }, + { + "data": { + "text/plain": [ + "['Ironhack', 'Q', 'website']" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "def remove_stopwords(l):\n", - " \"\"\"\n", - " Remove English stopwords from a list of strings.\n", + "from nltk.corpus import stopwords\n", + "nltk.download('stopwords')\n", "\n", - " Args:\n", - " l: A list of strings.\n", + "def remove_stopwords(l):\n", + " clean = [word for word in l if word not in stopwords.words()]\n", + " return clean\n", "\n", - " Returns:\n", - " A list of strings after stop words are removed.\n", - " \"\"\"" + "string_clean = remove_stopwords(string)\n", + "string_clean" ] }, { @@ -204,7 +288,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -218,7 +302,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.9.7" } }, "nbformat": 4, diff --git a/your-code/[challenge-2]Filipa T.ipynb b/your-code/[challenge-2]Filipa T.ipynb new file mode 100644 index 0000000..8539722 --- /dev/null +++ b/your-code/[challenge-2]Filipa T.ipynb @@ -0,0 +1,2664 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Challenge 2: Sentiment Analysis\n", + "\n", + "In this challenge we will learn sentiment analysis and practice performing sentiment analysis on Twitter tweets. \n", + "\n", + "## Introduction\n", + "\n", + "Sentiment analysis is to *systematically identify, extract, quantify, and study affective states and subjective information* based on texts ([reference](https://en.wikipedia.org/wiki/Sentiment_analysis)). In simple words, it's to understand whether a person is happy or unhappy in producing the piece of text. Why we (or rather, companies) care about sentiment in texts? It's because by understanding the sentiments in texts, we will be able to know if our customers are happy or unhappy about our products and services. If they are unhappy, the subsequent action is to figure out what have caused the unhappiness and make improvements.\n", + "\n", + "Basic sentiment analysis only understands the *positive* or *negative* (sometimes *neutral* too) polarities of the sentiment. More advanced sentiment analysis will also consider dimensions such as agreement, subjectivity, confidence, irony, and so on. In this challenge we will conduct the basic positive vs negative sentiment analysis based on real Twitter tweets.\n", + "\n", + "NLTK comes with a [sentiment analysis package](https://www.nltk.org/api/nltk.sentiment.html). This package is great for dummies to perform sentiment analysis because it requires only the textual data to make predictions. For example:\n", + "\n", + "```python\n", + ">>> from nltk.sentiment.vader import SentimentIntensityAnalyzer\n", + ">>> txt = \"Ironhack is a Global Tech School ranked num 2 worldwide. 
", + "
", + "Our mission is to help people transform their careers and join a thriving community of tech professionals that love what they do.\"\n", + ">>> analyzer = SentimentIntensityAnalyzer()\n", + ">>> analyzer.polarity_scores(txt)\n", + "{'neg': 0.0, 'neu': 0.741, 'pos': 0.259, 'compound': 0.8442}\n", + "```\n", + "\n", + "In this challenge, however, you will not use NLTK's sentiment analysis package because in your Machine Learning training in the past 2 weeks you have learned how to make predictions more accurate than that. The [tweets data](https://www.kaggle.com/kazanova/sentiment140) we will be using today are already coded for the positive/negative sentiment. You will be able to use the Naïve Bayes classifier you learned in the lesson to predict the sentiment of tweets based on the labels." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conducting Sentiment Analysis\n", + "\n", + "### Loading and Exploring Data\n", + "\n", + "The dataset we'll be using today is located on Kaggle (https://www.kaggle.com/kazanova/sentiment140). Once you have downloaded and imported the dataset, it you will need to define the columns names: df.columns = ['target','id','date','flag','user','text']\n", + "\n", + "*Notes:* \n", + "\n", + "* The dataset is huuuuge (1.6m tweets). When you develop your data analysis codes, you can sample a subset of the data (e.g. 20k records) so that you will save a lot of time when you test your codes." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import nltk\n", + "from nltk.stem import WordNetLemmatizer\n", + "from nltk.corpus import stopwords\n", + "from sklearn.feature_extraction.text import CountVectorizer\n", + "from sklearn.feature_extraction.text import TfidfVectorizer" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
targetiddateflagusertext
001467810369Mon Apr 06 22:19:45 PDT 2009NO_QUERY_TheSpecialOne_@switchfoot http://twitpic.com/2y1zl - Awww, t...
101467810672Mon Apr 06 22:19:49 PDT 2009NO_QUERYscotthamiltonis upset that he can't update his Facebook by ...
201467810917Mon Apr 06 22:19:53 PDT 2009NO_QUERYmattycus@Kenichan I dived many times for the ball. Man...
301467811184Mon Apr 06 22:19:57 PDT 2009NO_QUERYElleCTFmy whole body feels itchy and like its on fire
401467811193Mon Apr 06 22:19:57 PDT 2009NO_QUERYKaroli@nationwideclass no, it's not behaving at all....
.....................
1999501556975331Sun Apr 19 01:19:14 PDT 2009NO_QUERYTOMurdockPapersNot much time off this weekend, work trip to M...
1999601556976068Sun Apr 19 01:19:30 PDT 2009NO_QUERYnikibennnOne more day of holidays
1999701556976167Sun Apr 19 01:19:32 PDT 2009NO_QUERYeifflesummerfeeling so down right now .. i hate you DAMN H...
1999801556976222Sun Apr 19 01:19:34 PDT 2009NO_QUERYlomobabesgeez,i hv to READ the whole book of personalit...
1999901556976246Sun Apr 19 01:19:34 PDT 2009NO_QUERYthatsblue2uI threw my sign at donnie and he bent over to ...
\n", + "

20000 rows × 6 columns

\n", + "
" + ], + "text/plain": [ + " target id date flag \\\n", + "0 0 1467810369 Mon Apr 06 22:19:45 PDT 2009 NO_QUERY \n", + "1 0 1467810672 Mon Apr 06 22:19:49 PDT 2009 NO_QUERY \n", + "2 0 1467810917 Mon Apr 06 22:19:53 PDT 2009 NO_QUERY \n", + "3 0 1467811184 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "4 0 1467811193 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "... ... ... ... ... \n", + "19995 0 1556975331 Sun Apr 19 01:19:14 PDT 2009 NO_QUERY \n", + "19996 0 1556976068 Sun Apr 19 01:19:30 PDT 2009 NO_QUERY \n", + "19997 0 1556976167 Sun Apr 19 01:19:32 PDT 2009 NO_QUERY \n", + "19998 0 1556976222 Sun Apr 19 01:19:34 PDT 2009 NO_QUERY \n", + "19999 0 1556976246 Sun Apr 19 01:19:34 PDT 2009 NO_QUERY \n", + "\n", + " user text \n", + "0 _TheSpecialOne_ @switchfoot http://twitpic.com/2y1zl - Awww, t... \n", + "1 scotthamilton is upset that he can't update his Facebook by ... \n", + "2 mattycus @Kenichan I dived many times for the ball. Man... \n", + "3 ElleCTF my whole body feels itchy and like its on fire \n", + "4 Karoli @nationwideclass no, it's not behaving at all.... \n", + "... ... ... \n", + "19995 TOMurdockPapers Not much time off this weekend, work trip to M... \n", + "19996 nikibennn One more day of holidays \n", + "19997 eifflesummer feeling so down right now .. i hate you DAMN H... \n", + "19998 lomobabes geez,i hv to READ the whole book of personalit... \n", + "19999 thatsblue2u I threw my sign at donnie and he bent over to ... \n", + "\n", + "[20000 rows x 6 columns]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "data = pd.read_csv(r'C:\\Users\\ljant\\Downloads\\archive\\training.1600000.processed.noemoticon.csv', encoding='ISO-8859-1',\n", + " header=None, names=['target', 'id', 'date', 'flag', 'user', 'text'])\n", + "data\n", + "\n", + "df = data[:20000]\n", + "df" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Prepare Textual Data for Sentiment Analysis\n", + "\n", + "Now, apply the functions you have written in Challenge 1 to your whole data set. These functions include:\n", + "\n", + "* `clean_up()`\n", + "\n", + "* `tokenize()`\n", + "\n", + "* `stem_and_lemmatize()`\n", + "\n", + "* `remove_stopwords()`\n", + "\n", + "Create a new column called `text_processed` in the dataframe to contain the processed data. At the end, your `text_processed` column should contain lists of word tokens that are cleaned up. Your data should look like below:\n", + "\n", + "![Processed Data](data-cleaning-results.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1. Tokenize" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\ljant\\AppData\\Local\\Temp/ipykernel_11760/2297630043.py:6: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " df['text_processed'] = df.apply(tokenizer_and_remove_punctuation,axis=1)\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
targetiddateflagusertexttext_processed
001467810369Mon Apr 06 22:19:45 PDT 2009NO_QUERY_TheSpecialOne_@switchfoot http://twitpic.com/2y1zl - Awww, t...[switchfoot, http, awww, that, a, bummer, you,...
101467810672Mon Apr 06 22:19:49 PDT 2009NO_QUERYscotthamiltonis upset that he can't update his Facebook by ...[is, upset, that, he, ca, update, his, faceboo...
201467810917Mon Apr 06 22:19:53 PDT 2009NO_QUERYmattycus@Kenichan I dived many times for the ball. Man...[kenichan, i, dived, many, times, for, the, ba...
301467811184Mon Apr 06 22:19:57 PDT 2009NO_QUERYElleCTFmy whole body feels itchy and like its on fire[my, whole, body, feels, itchy, and, like, its...
401467811193Mon Apr 06 22:19:57 PDT 2009NO_QUERYKaroli@nationwideclass no, it's not behaving at all....[nationwideclass, no, it, not, behaving, at, a...
\n", + "
" + ], + "text/plain": [ + " target id date flag \\\n", + "0 0 1467810369 Mon Apr 06 22:19:45 PDT 2009 NO_QUERY \n", + "1 0 1467810672 Mon Apr 06 22:19:49 PDT 2009 NO_QUERY \n", + "2 0 1467810917 Mon Apr 06 22:19:53 PDT 2009 NO_QUERY \n", + "3 0 1467811184 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "4 0 1467811193 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "\n", + " user text \\\n", + "0 _TheSpecialOne_ @switchfoot http://twitpic.com/2y1zl - Awww, t... \n", + "1 scotthamilton is upset that he can't update his Facebook by ... \n", + "2 mattycus @Kenichan I dived many times for the ball. Man... \n", + "3 ElleCTF my whole body feels itchy and like its on fire \n", + "4 Karoli @nationwideclass no, it's not behaving at all.... \n", + "\n", + " text_processed \n", + "0 [switchfoot, http, awww, that, a, bummer, you,... \n", + "1 [is, upset, that, he, ca, update, his, faceboo... \n", + "2 [kenichan, i, dived, many, times, for, the, ba... \n", + "3 [my, whole, body, feels, itchy, and, like, its... \n", + "4 [nationwideclass, no, it, not, behaving, at, a... " + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from nltk.tokenize import word_tokenize\n", + "\n", + "def tokenizer_and_remove_punctuation(row):\n", + " tokens = word_tokenize(row['text'])\n", + " return [word.lower() for word in tokens if word.isalpha()]\n", + "df['text_processed'] = df.apply(tokenizer_and_remove_punctuation,axis=1)\n", + "df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package wordnet to\n", + "[nltk_data] C:\\Users\\ljant\\AppData\\Roaming\\nltk_data...\n", + "[nltk_data] Package wordnet is already up-to-date!\n", + "C:\\Users\\ljant\\AppData\\Local\\Temp/ipykernel_11760/3375511815.py:16: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " df['text_processed'] = df.apply(lemmatizer_with_pos,axis=1)\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
targetiddateflagusertexttext_processed
001467810369Mon Apr 06 22:19:45 PDT 2009NO_QUERY_TheSpecialOne_@switchfoot http://twitpic.com/2y1zl - Awww, t...[switchfoot, http, awww, that, a, bummer, you,...
101467810672Mon Apr 06 22:19:49 PDT 2009NO_QUERYscotthamiltonis upset that he can't update his Facebook by ...[be, upset, that, he, ca, update, his, faceboo...
201467810917Mon Apr 06 22:19:53 PDT 2009NO_QUERYmattycus@Kenichan I dived many times for the ball. Man...[kenichan, i, dive, many, time, for, the, ball...
301467811184Mon Apr 06 22:19:57 PDT 2009NO_QUERYElleCTFmy whole body feels itchy and like its on fire[my, whole, body, feel, itchy, and, like, it, ...
401467811193Mon Apr 06 22:19:57 PDT 2009NO_QUERYKaroli@nationwideclass no, it's not behaving at all....[nationwideclass, no, it, not, behaving, at, a...
\n", + "
" + ], + "text/plain": [ + " target id date flag \\\n", + "0 0 1467810369 Mon Apr 06 22:19:45 PDT 2009 NO_QUERY \n", + "1 0 1467810672 Mon Apr 06 22:19:49 PDT 2009 NO_QUERY \n", + "2 0 1467810917 Mon Apr 06 22:19:53 PDT 2009 NO_QUERY \n", + "3 0 1467811184 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "4 0 1467811193 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "\n", + " user text \\\n", + "0 _TheSpecialOne_ @switchfoot http://twitpic.com/2y1zl - Awww, t... \n", + "1 scotthamilton is upset that he can't update his Facebook by ... \n", + "2 mattycus @Kenichan I dived many times for the ball. Man... \n", + "3 ElleCTF my whole body feels itchy and like its on fire \n", + "4 Karoli @nationwideclass no, it's not behaving at all.... \n", + "\n", + " text_processed \n", + "0 [switchfoot, http, awww, that, a, bummer, you,... \n", + "1 [be, upset, that, he, ca, update, his, faceboo... \n", + "2 [kenichan, i, dive, many, time, for, the, ball... \n", + "3 [my, whole, body, feel, itchy, and, like, it, ... \n", + "4 [nationwideclass, no, it, not, behaving, at, a... " + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nltk.download('wordnet')\n", + "from nltk.stem import WordNetLemmatizer\n", + "from nltk.corpus import wordnet\n", + "\n", + "def get_wordnet_pos(word):\n", + " tag = nltk.pos_tag([word], lang='eng')[0][1][0].upper()\n", + " tag_dict = {\"J\": wordnet.ADJ,\n", + " \"N\": wordnet.NOUN,\n", + " \"V\": wordnet.VERB,\n", + " \"R\": wordnet.ADV}\n", + " return tag_dict.get(tag, wordnet.NOUN)\n", + "\n", + "lemmatizer = WordNetLemmatizer()\n", + "def lemmatizer_with_pos(row):\n", + " return [lemmatizer.lemmatize(word,get_wordnet_pos(word)) for word in row['text_processed']]\n", + "df['text_processed'] = df.apply(lemmatizer_with_pos,axis=1)\n", + "df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package stopwords to\n", + "[nltk_data] C:\\Users\\ljant\\AppData\\Roaming\\nltk_data...\n", + "[nltk_data] Package stopwords is already up-to-date!\n", + "C:\\Users\\ljant\\AppData\\Local\\Temp/ipykernel_11760/4206111715.py:6: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " df['text_processed'] = df.apply(remove_sw,axis=1)\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
targetiddateflagusertexttext_processed
001467810369Mon Apr 06 22:19:45 PDT 2009NO_QUERY_TheSpecialOne_@switchfoot http://twitpic.com/2y1zl - Awww, t...[carr, http, shoulda, switchfoot, bummer, davi...
101467810672Mon Apr 06 22:19:49 PDT 2009NO_QUERYscotthamiltonis upset that he can't update his Facebook by ...[update, cry, texting, today, result, facebook...
201467810917Mon Apr 06 22:19:53 PDT 2009NO_QUERYmattycus@Kenichan I dived many times for the ball. Man...[kenichan, save, manage, dive, time, rest, bou...
301467811184Mon Apr 06 22:19:57 PDT 2009NO_QUERYElleCTFmy whole body feels itchy and like its on fire[body, itchy, fire, feel]
401467811193Mon Apr 06 22:19:57 PDT 2009NO_QUERYKaroli@nationwideclass no, it's not behaving at all....[behaving, nationwideclass, mad]
\n", + "
" + ], + "text/plain": [ + " target id date flag \\\n", + "0 0 1467810369 Mon Apr 06 22:19:45 PDT 2009 NO_QUERY \n", + "1 0 1467810672 Mon Apr 06 22:19:49 PDT 2009 NO_QUERY \n", + "2 0 1467810917 Mon Apr 06 22:19:53 PDT 2009 NO_QUERY \n", + "3 0 1467811184 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "4 0 1467811193 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "\n", + " user text \\\n", + "0 _TheSpecialOne_ @switchfoot http://twitpic.com/2y1zl - Awww, t... \n", + "1 scotthamilton is upset that he can't update his Facebook by ... \n", + "2 mattycus @Kenichan I dived many times for the ball. Man... \n", + "3 ElleCTF my whole body feels itchy and like its on fire \n", + "4 Karoli @nationwideclass no, it's not behaving at all.... \n", + "\n", + " text_processed \n", + "0 [carr, http, shoulda, switchfoot, bummer, davi... \n", + "1 [update, cry, texting, today, result, facebook... \n", + "2 [kenichan, save, manage, dive, time, rest, bou... \n", + "3 [body, itchy, fire, feel] \n", + "4 [behaving, nationwideclass, mad] " + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from nltk.corpus import stopwords\n", + "nltk.download('stopwords')\n", + "\n", + "def remove_sw(row):\n", + " return list(set(row['text_processed']).difference(stopwords.words()))\n", + "df['text_processed'] = df.apply(remove_sw,axis=1)\n", + "df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\ljant\\AppData\\Local\\Temp/ipykernel_11760/1833788326.py:3: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " df['text_processed'] = df.apply(re_blob,axis=1)\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
targetiddateflagusertexttext_processed
001467810369Mon Apr 06 22:19:45 PDT 2009NO_QUERY_TheSpecialOne_@switchfoot http://twitpic.com/2y1zl - Awww, t...carr http shoulda switchfoot bummer david day ...
101467810672Mon Apr 06 22:19:49 PDT 2009NO_QUERYscotthamiltonis upset that he can't update his Facebook by ...update cry texting today result facebook blah ...
201467810917Mon Apr 06 22:19:53 PDT 2009NO_QUERYmattycus@Kenichan I dived many times for the ball. Man...kenichan save manage dive time rest bound ball
301467811184Mon Apr 06 22:19:57 PDT 2009NO_QUERYElleCTFmy whole body feels itchy and like its on firebody itchy fire feel
401467811193Mon Apr 06 22:19:57 PDT 2009NO_QUERYKaroli@nationwideclass no, it's not behaving at all....behaving nationwideclass mad
\n", + "
" + ], + "text/plain": [ + " target id date flag \\\n", + "0 0 1467810369 Mon Apr 06 22:19:45 PDT 2009 NO_QUERY \n", + "1 0 1467810672 Mon Apr 06 22:19:49 PDT 2009 NO_QUERY \n", + "2 0 1467810917 Mon Apr 06 22:19:53 PDT 2009 NO_QUERY \n", + "3 0 1467811184 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "4 0 1467811193 Mon Apr 06 22:19:57 PDT 2009 NO_QUERY \n", + "\n", + " user text \\\n", + "0 _TheSpecialOne_ @switchfoot http://twitpic.com/2y1zl - Awww, t... \n", + "1 scotthamilton is upset that he can't update his Facebook by ... \n", + "2 mattycus @Kenichan I dived many times for the ball. Man... \n", + "3 ElleCTF my whole body feels itchy and like its on fire \n", + "4 Karoli @nationwideclass no, it's not behaving at all.... \n", + "\n", + " text_processed \n", + "0 carr http shoulda switchfoot bummer david day ... \n", + "1 update cry texting today result facebook blah ... \n", + "2 kenichan save manage dive time rest bound ball \n", + "3 body itchy fire feel \n", + "4 behaving nationwideclass mad " + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def re_blob(row):\n", + " return \" \".join(row['text_processed'])\n", + "df['text_processed'] = df.apply(re_blob,axis=1)\n", + "df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Creating Bag of Words\n", + "\n", + "The purpose of this step is to create a [bag of words](https://en.wikipedia.org/wiki/Bag-of-words_model) from the processed data. The bag of words contains all the unique words in your whole text body (a.k.a. *corpus*) with the number of occurrence of each word. It will allow you to understand which words are the most important features across the whole corpus.\n", + "\n", + "Also, you can imagine you will have a massive set of words. The less important words (i.e. those of very low number of occurrence) do not contribute much to the sentiment. Therefore, you only need to use the most important words to build your feature set in the next step. In our case, we will use the top 5,000 words with the highest frequency to build the features.\n", + "\n", + "In the cell below, combine all the words in `text_processed` and calculate the frequency distribution of all words. A convenient library to calculate the term frequency distribution is NLTK's `FreqDist` class ([documentation](https://www.nltk.org/api/nltk.html#module-nltk.probability)). Then select the top 5,000 words from the frequency distribution." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
aaaaaaaaahaaronabandonabbyabilityabitabruzzoabsolute...yummyzaczachzackzackalltimelowzerozipzombiezonezoo
00000000000...0000000000
10000000000...0000000000
20000000000...0000000000
30000000000...0000000000
40000000000...0000000000
50000000000...0000000000
60000000000...0000000000
70000000000...0000000000
80000000000...0000000000
90000000000...0000000000
100000000000...0000000000
110000000000...0000000000
120000000000...0000000000
130000000000...0000000000
140000000000...0100000000
150000000000...0000000000
160000000000...0000000000
170000000000...0000000000
180000000000...0000000000
190000000000...0000000000
200000000000...0000000000
210000000000...0000000000
220000000000...0000000000
230000000000...0000000000
240000000000...0000000000
250000000000...0000000000
260000000000...0000000000
270000000000...0000000000
280000000000...0000000000
290000000000...0000000000
300000000000...0000000000
310000000000...0000000000
320000000000...0000000000
330000000000...0000000000
340000000000...0000000000
350000000000...0000000000
360000000000...0000000000
370000000000...0000000000
380000000000...0000000000
390000000000...0000000000
400000000000...0000000000
410000000000...0000000000
420000000000...0000000000
430000000000...0000000000
440000000000...0000000000
450000000000...0000000000
460000000000...0000000000
470000000000...0000000000
480000000000...0000000000
490000000000...0000000000
\n", + "

50 rows × 5000 columns

\n", + "
" + ], + "text/plain": [ + " aa aaa aaaah aaron abandon abby ability abit abruzzo absolute \\\n", + "0 0 0 0 0 0 0 0 0 0 0 \n", + "1 0 0 0 0 0 0 0 0 0 0 \n", + "2 0 0 0 0 0 0 0 0 0 0 \n", + "3 0 0 0 0 0 0 0 0 0 0 \n", + "4 0 0 0 0 0 0 0 0 0 0 \n", + "5 0 0 0 0 0 0 0 0 0 0 \n", + "6 0 0 0 0 0 0 0 0 0 0 \n", + "7 0 0 0 0 0 0 0 0 0 0 \n", + "8 0 0 0 0 0 0 0 0 0 0 \n", + "9 0 0 0 0 0 0 0 0 0 0 \n", + "10 0 0 0 0 0 0 0 0 0 0 \n", + "11 0 0 0 0 0 0 0 0 0 0 \n", + "12 0 0 0 0 0 0 0 0 0 0 \n", + "13 0 0 0 0 0 0 0 0 0 0 \n", + "14 0 0 0 0 0 0 0 0 0 0 \n", + "15 0 0 0 0 0 0 0 0 0 0 \n", + "16 0 0 0 0 0 0 0 0 0 0 \n", + "17 0 0 0 0 0 0 0 0 0 0 \n", + "18 0 0 0 0 0 0 0 0 0 0 \n", + "19 0 0 0 0 0 0 0 0 0 0 \n", + "20 0 0 0 0 0 0 0 0 0 0 \n", + "21 0 0 0 0 0 0 0 0 0 0 \n", + "22 0 0 0 0 0 0 0 0 0 0 \n", + "23 0 0 0 0 0 0 0 0 0 0 \n", + "24 0 0 0 0 0 0 0 0 0 0 \n", + "25 0 0 0 0 0 0 0 0 0 0 \n", + "26 0 0 0 0 0 0 0 0 0 0 \n", + "27 0 0 0 0 0 0 0 0 0 0 \n", + "28 0 0 0 0 0 0 0 0 0 0 \n", + "29 0 0 0 0 0 0 0 0 0 0 \n", + "30 0 0 0 0 0 0 0 0 0 0 \n", + "31 0 0 0 0 0 0 0 0 0 0 \n", + "32 0 0 0 0 0 0 0 0 0 0 \n", + "33 0 0 0 0 0 0 0 0 0 0 \n", + "34 0 0 0 0 0 0 0 0 0 0 \n", + "35 0 0 0 0 0 0 0 0 0 0 \n", + "36 0 0 0 0 0 0 0 0 0 0 \n", + "37 0 0 0 0 0 0 0 0 0 0 \n", + "38 0 0 0 0 0 0 0 0 0 0 \n", + "39 0 0 0 0 0 0 0 0 0 0 \n", + "40 0 0 0 0 0 0 0 0 0 0 \n", + "41 0 0 0 0 0 0 0 0 0 0 \n", + "42 0 0 0 0 0 0 0 0 0 0 \n", + "43 0 0 0 0 0 0 0 0 0 0 \n", + "44 0 0 0 0 0 0 0 0 0 0 \n", + "45 0 0 0 0 0 0 0 0 0 0 \n", + "46 0 0 0 0 0 0 0 0 0 0 \n", + "47 0 0 0 0 0 0 0 0 0 0 \n", + "48 0 0 0 0 0 0 0 0 0 0 \n", + "49 0 0 0 0 0 0 0 0 0 0 \n", + "\n", + " ... yummy zac zach zack zackalltimelow zero zip zombie zone zoo \n", + "0 ... 0 0 0 0 0 0 0 0 0 0 \n", + "1 ... 0 0 0 0 0 0 0 0 0 0 \n", + "2 ... 0 0 0 0 0 0 0 0 0 0 \n", + "3 ... 0 0 0 0 0 0 0 0 0 0 \n", + "4 ... 0 0 0 0 0 0 0 0 0 0 \n", + "5 ... 0 0 0 0 0 0 0 0 0 0 \n", + "6 ... 0 0 0 0 0 0 0 0 0 0 \n", + "7 ... 0 0 0 0 0 0 0 0 0 0 \n", + "8 ... 0 0 0 0 0 0 0 0 0 0 \n", + "9 ... 0 0 0 0 0 0 0 0 0 0 \n", + "10 ... 0 0 0 0 0 0 0 0 0 0 \n", + "11 ... 0 0 0 0 0 0 0 0 0 0 \n", + "12 ... 0 0 0 0 0 0 0 0 0 0 \n", + "13 ... 0 0 0 0 0 0 0 0 0 0 \n", + "14 ... 0 1 0 0 0 0 0 0 0 0 \n", + "15 ... 0 0 0 0 0 0 0 0 0 0 \n", + "16 ... 0 0 0 0 0 0 0 0 0 0 \n", + "17 ... 0 0 0 0 0 0 0 0 0 0 \n", + "18 ... 0 0 0 0 0 0 0 0 0 0 \n", + "19 ... 0 0 0 0 0 0 0 0 0 0 \n", + "20 ... 0 0 0 0 0 0 0 0 0 0 \n", + "21 ... 0 0 0 0 0 0 0 0 0 0 \n", + "22 ... 0 0 0 0 0 0 0 0 0 0 \n", + "23 ... 0 0 0 0 0 0 0 0 0 0 \n", + "24 ... 0 0 0 0 0 0 0 0 0 0 \n", + "25 ... 0 0 0 0 0 0 0 0 0 0 \n", + "26 ... 0 0 0 0 0 0 0 0 0 0 \n", + "27 ... 0 0 0 0 0 0 0 0 0 0 \n", + "28 ... 0 0 0 0 0 0 0 0 0 0 \n", + "29 ... 0 0 0 0 0 0 0 0 0 0 \n", + "30 ... 0 0 0 0 0 0 0 0 0 0 \n", + "31 ... 0 0 0 0 0 0 0 0 0 0 \n", + "32 ... 0 0 0 0 0 0 0 0 0 0 \n", + "33 ... 0 0 0 0 0 0 0 0 0 0 \n", + "34 ... 0 0 0 0 0 0 0 0 0 0 \n", + "35 ... 0 0 0 0 0 0 0 0 0 0 \n", + "36 ... 0 0 0 0 0 0 0 0 0 0 \n", + "37 ... 0 0 0 0 0 0 0 0 0 0 \n", + "38 ... 0 0 0 0 0 0 0 0 0 0 \n", + "39 ... 0 0 0 0 0 0 0 0 0 0 \n", + "40 ... 0 0 0 0 0 0 0 0 0 0 \n", + "41 ... 0 0 0 0 0 0 0 0 0 0 \n", + "42 ... 0 0 0 0 0 0 0 0 0 0 \n", + "43 ... 0 0 0 0 0 0 0 0 0 0 \n", + "44 ... 0 0 0 0 0 0 0 0 0 0 \n", + "45 ... 0 0 0 0 0 0 0 0 0 0 \n", + "46 ... 0 0 0 0 0 0 0 0 0 0 \n", + "47 ... 0 0 0 0 0 0 0 0 0 0 \n", + "48 ... 0 0 0 0 0 0 0 0 0 0 \n", + "49 ... 0 0 0 0 0 0 0 0 0 0 \n", + "\n", + "[50 rows x 5000 columns]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "bow_vect = CountVectorizer(max_features=5000)\n", + "X = bow_vect.fit_transform(df['text_processed']).toarray()\n", + "\n", + "as_df = pd.DataFrame(X,columns=bow_vect.get_feature_names_out())\n", + "as_df.head(50)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Building Features\n", + "\n", + "Now let's build the features. Using the top 5,000 words, create a 2-dimensional matrix to record whether each of those words is contained in each document (tweet). Then you also have an output column to indicate whether the sentiment in each tweet is positive. For example, assuming your bag of words has 5 items (`['one', 'two', 'three', 'four', 'five']`) out of 4 documents (`['A', 'B', 'C', 'D']`), your feature set is essentially:\n", + "\n", + "| Doc | one | two | three | four | five | is_positive |\n", + "|---|---|---|---|---|---|---|\n", + "| A | True | False | False | True | False | True |\n", + "| B | False | False | False | True | True | False |\n", + "| C | False | True | False | False | False | True |\n", + "| D | True | False | False | False | True | False|\n", + "\n", + "However, because the `nltk.NaiveBayesClassifier.train` class we will use in the next step does not work with Pandas dataframe, the structure of your feature set should be converted to the Python list looking like below:\n", + "\n", + "```python\n", + "[\n", + "\t({\n", + "\t\t'one': True,\n", + "\t\t'two': False,\n", + "\t\t'three': False,\n", + "\t\t'four': True,\n", + "\t\t'five': False\n", + "\t}, True),\n", + "\t({\n", + "\t\t'one': False,\n", + "\t\t'two': False,\n", + "\t\t'three': False,\n", + "\t\t'four': True,\n", + "\t\t'five': True\n", + "\t}, False),\n", + "\t({\n", + "\t\t'one': False,\n", + "\t\t'two': True,\n", + "\t\t'three': False,\n", + "\t\t'four': False,\n", + "\t\t'five': False\n", + "\t}, True),\n", + "\t({\n", + "\t\t'one': True,\n", + "\t\t'two': False,\n", + "\t\t'three': False,\n", + "\t\t'four': False,\n", + "\t\t'five': True\n", + "\t}, False)\n", + "]\n", + "```\n", + "\n", + "To help you in this step, watch the [following video](https://www.youtube.com/watch?v=-vVskDsHcVc) to learn how to build the feature set with Python and NLTK. The source code in this video can be found [here](https://pythonprogramming.net/words-as-features-nltk-tutorial/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[![Building Features](building-features.jpg)](https://www.youtube.com/watch?v=-vVskDsHcVc)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\ljant\\anaconda3\\lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1416: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n" + ] + } + ], + "source": [ + "from sklearn.cluster import KMeans\n", + "kmeans = KMeans(n_clusters=2, random_state=0)\n", + "kmeans.fit(X)\n", + "pred = kmeans.predict(X)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
textclass
0@switchfoot http://twitpic.com/2y1zl - Awww, t...1
1is upset that he can't update his Facebook by ...0
2@Kenichan I dived many times for the ball. Man...0
3my whole body feels itchy and like its on fire0
4@nationwideclass no, it's not behaving at all....0
\n", + "
" + ], + "text/plain": [ + " text class\n", + "0 @switchfoot http://twitpic.com/2y1zl - Awww, t... 1\n", + "1 is upset that he can't update his Facebook by ... 0\n", + "2 @Kenichan I dived many times for the ball. Man... 0\n", + "3 my whole body feels itchy and like its on fire 0\n", + "4 @nationwideclass no, it's not behaving at all.... 0" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "predict_df = pd.concat([df['text'],pd.DataFrame(pred,columns=['class'])],axis=1)\n", + "predict_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
textclass
1is upset that he can't update his Facebook by ...0
2@Kenichan I dived many times for the ball. Man...0
3my whole body feels itchy and like its on fire0
4@nationwideclass no, it's not behaving at all....0
5@Kwesidei not the whole crew0
.........
19994I am stuck in the city y is the holland tunnel...0
19995Not much time off this weekend, work trip to M...0
19997feeling so down right now .. i hate you DAMN H...0
19998geez,i hv to READ the whole book of personalit...0
19999I threw my sign at donnie and he bent over to ...0
\n", + "

18815 rows × 2 columns

\n", + "
" + ], + "text/plain": [ + " text class\n", + "1 is upset that he can't update his Facebook by ... 0\n", + "2 @Kenichan I dived many times for the ball. Man... 0\n", + "3 my whole body feels itchy and like its on fire 0\n", + "4 @nationwideclass no, it's not behaving at all.... 0\n", + "5 @Kwesidei not the whole crew 0\n", + "... ... ...\n", + "19994 I am stuck in the city y is the holland tunnel... 0\n", + "19995 Not much time off this weekend, work trip to M... 0\n", + "19997 feeling so down right now .. i hate you DAMN H... 0\n", + "19998 geez,i hv to READ the whole book of personalit... 0\n", + "19999 I threw my sign at donnie and he bent over to ... 0\n", + "\n", + "[18815 rows x 2 columns]" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "predict_df[predict_df['class'] ==0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# your code here" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Building and Traininng Naive Bayes Model\n", + "\n", + "In this step you will split your feature set into a training and a test set. Then you will create a Bayes classifier instance using `nltk.NaiveBayesClassifier.train` ([example](https://www.nltk.org/book/ch06.html)) to train with the training dataset.\n", + "\n", + "After training the model, call `classifier.show_most_informative_features()` to inspect the most important features. The output will look like:\n", + "\n", + "```\n", + "Most Informative Features\n", + "\t snow = True False : True = 34.3 : 1.0\n", + "\t easter = True False : True = 26.2 : 1.0\n", + "\t headach = True False : True = 20.9 : 1.0\n", + "\t argh = True False : True = 17.6 : 1.0\n", + "\tunfortun = True False : True = 16.9 : 1.0\n", + "\t jona = True True : False = 16.2 : 1.0\n", + "\t ach = True False : True = 14.9 : 1.0\n", + "\t sad = True False : True = 13.0 : 1.0\n", + "\t parent = True False : True = 12.9 : 1.0\n", + "\t spring = True False : True = 12.7 : 1.0\n", + "```\n", + "\n", + "The [following video](https://www.youtube.com/watch?v=rISOsUaTrO4) will help you complete this step. The source code in this video can be found [here](https://pythonprogramming.net/naive-bayes-classifier-nltk-tutorial/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[![Building and Training NB](nb-model-building.jpg)](https://www.youtube.com/watch?v=rISOsUaTrO4)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# your code here" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Testing Naive Bayes Model\n", + "\n", + "Now we'll test our classifier with the test dataset. This is done by calling `nltk.classify.accuracy(classifier, test)`.\n", + "\n", + "As mentioned in one of the tutorial videos, a Naive Bayes model is considered OK if your accuracy score is over 0.6. If your accuracy score is over 0.7, you've done a great job!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# your code here" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Bonus Question 1: Improve Model Performance\n", + "\n", + "If you are still not exhausted so far and want to dig deeper, try to improve your classifier performance. There are many aspects you can dig into, for example:\n", + "\n", + "* Improve stemming and lemmatization. Inspect your bag of words and the most important features. Are there any words you should furuther remove from analysis? You can append these words to further remove to the stop words list.\n", + "\n", + "* Remember we only used the top 5,000 features to build model? Try using different numbers of top features. The bottom line is to use as few features as you can without compromising your model performance. The fewer features you select into your model, the faster your model is trained. Then you can use a larger sample size to improve your model accuracy score." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# your code here" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Bonus Question 2: Machine Learning Pipeline\n", + "\n", + "In a new Jupyter Notebook, combine all your codes into a function (or a class). Your new function will execute the complete machine learning pipeline job by receiving the dataset location and output the classifier. This will allow you to use your function to predict the sentiment of any tweet in real time. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# your code here" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Bonus Question 3: Apache Spark\n", + "\n", + "If you have completed the Apache Spark advanced topic lab, what you can do is to migrate your pipeline from local to a Databricks Notebook. Share your notebook with your instructor and classmates to show off your achievements!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# your code here" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/your-code/challenge-2.ipynb b/your-code/challenge-2.ipynb deleted file mode 100644 index 6b0e116..0000000 --- a/your-code/challenge-2.ipynb +++ /dev/null @@ -1,320 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Challenge 2: Sentiment Analysis\n", - "\n", - "In this challenge we will learn sentiment analysis and practice performing sentiment analysis on Twitter tweets. \n", - "\n", - "## Introduction\n", - "\n", - "Sentiment analysis is to *systematically identify, extract, quantify, and study affective states and subjective information* based on texts ([reference](https://en.wikipedia.org/wiki/Sentiment_analysis)). In simple words, it's to understand whether a person is happy or unhappy in producing the piece of text. Why we (or rather, companies) care about sentiment in texts? It's because by understanding the sentiments in texts, we will be able to know if our customers are happy or unhappy about our products and services. If they are unhappy, the subsequent action is to figure out what have caused the unhappiness and make improvements.\n", - "\n", - "Basic sentiment analysis only understands the *positive* or *negative* (sometimes *neutral* too) polarities of the sentiment. More advanced sentiment analysis will also consider dimensions such as agreement, subjectivity, confidence, irony, and so on. In this challenge we will conduct the basic positive vs negative sentiment analysis based on real Twitter tweets.\n", - "\n", - "NLTK comes with a [sentiment analysis package](https://www.nltk.org/api/nltk.sentiment.html). This package is great for dummies to perform sentiment analysis because it requires only the textual data to make predictions. For example:\n", - "\n", - "```python\n", - ">>> from nltk.sentiment.vader import SentimentIntensityAnalyzer\n", - ">>> txt = \"Ironhack is a Global Tech School ranked num 2 worldwide. 
", - "
", - "Our mission is to help people transform their careers and join a thriving community of tech professionals that love what they do.\"\n", - ">>> analyzer = SentimentIntensityAnalyzer()\n", - ">>> analyzer.polarity_scores(txt)\n", - "{'neg': 0.0, 'neu': 0.741, 'pos': 0.259, 'compound': 0.8442}\n", - "```\n", - "\n", - "In this challenge, however, you will not use NLTK's sentiment analysis package because in your Machine Learning training in the past 2 weeks you have learned how to make predictions more accurate than that. The [tweets data](https://www.kaggle.com/kazanova/sentiment140) we will be using today are already coded for the positive/negative sentiment. You will be able to use the Naïve Bayes classifier you learned in the lesson to predict the sentiment of tweets based on the labels." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conducting Sentiment Analysis\n", - "\n", - "### Loading and Exploring Data\n", - "\n", - "The dataset we'll be using today is located on Kaggle (https://www.kaggle.com/kazanova/sentiment140). Once you have downloaded and imported the dataset, it you will need to define the columns names: df.columns = ['target','id','date','flag','user','text']\n", - "\n", - "*Notes:* \n", - "\n", - "* The dataset is huuuuge (1.6m tweets). When you develop your data analysis codes, you can sample a subset of the data (e.g. 20k records) so that you will save a lot of time when you test your codes." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Prepare Textual Data for Sentiment Analysis\n", - "\n", - "Now, apply the functions you have written in Challenge 1 to your whole data set. These functions include:\n", - "\n", - "* `clean_up()`\n", - "\n", - "* `tokenize()`\n", - "\n", - "* `stem_and_lemmatize()`\n", - "\n", - "* `remove_stopwords()`\n", - "\n", - "Create a new column called `text_processed` in the dataframe to contain the processed data. At the end, your `text_processed` column should contain lists of word tokens that are cleaned up. Your data should look like below:\n", - "\n", - "![Processed Data](data-cleaning-results.png)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Creating Bag of Words\n", - "\n", - "The purpose of this step is to create a [bag of words](https://en.wikipedia.org/wiki/Bag-of-words_model) from the processed data. The bag of words contains all the unique words in your whole text body (a.k.a. *corpus*) with the number of occurrence of each word. It will allow you to understand which words are the most important features across the whole corpus.\n", - "\n", - "Also, you can imagine you will have a massive set of words. The less important words (i.e. those of very low number of occurrence) do not contribute much to the sentiment. Therefore, you only need to use the most important words to build your feature set in the next step. In our case, we will use the top 5,000 words with the highest frequency to build the features.\n", - "\n", - "In the cell below, combine all the words in `text_processed` and calculate the frequency distribution of all words. A convenient library to calculate the term frequency distribution is NLTK's `FreqDist` class ([documentation](https://www.nltk.org/api/nltk.html#module-nltk.probability)). Then select the top 5,000 words from the frequency distribution." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Building Features\n", - "\n", - "Now let's build the features. Using the top 5,000 words, create a 2-dimensional matrix to record whether each of those words is contained in each document (tweet). Then you also have an output column to indicate whether the sentiment in each tweet is positive. For example, assuming your bag of words has 5 items (`['one', 'two', 'three', 'four', 'five']`) out of 4 documents (`['A', 'B', 'C', 'D']`), your feature set is essentially:\n", - "\n", - "| Doc | one | two | three | four | five | is_positive |\n", - "|---|---|---|---|---|---|---|\n", - "| A | True | False | False | True | False | True |\n", - "| B | False | False | False | True | True | False |\n", - "| C | False | True | False | False | False | True |\n", - "| D | True | False | False | False | True | False|\n", - "\n", - "However, because the `nltk.NaiveBayesClassifier.train` class we will use in the next step does not work with Pandas dataframe, the structure of your feature set should be converted to the Python list looking like below:\n", - "\n", - "```python\n", - "[\n", - "\t({\n", - "\t\t'one': True,\n", - "\t\t'two': False,\n", - "\t\t'three': False,\n", - "\t\t'four': True,\n", - "\t\t'five': False\n", - "\t}, True),\n", - "\t({\n", - "\t\t'one': False,\n", - "\t\t'two': False,\n", - "\t\t'three': False,\n", - "\t\t'four': True,\n", - "\t\t'five': True\n", - "\t}, False),\n", - "\t({\n", - "\t\t'one': False,\n", - "\t\t'two': True,\n", - "\t\t'three': False,\n", - "\t\t'four': False,\n", - "\t\t'five': False\n", - "\t}, True),\n", - "\t({\n", - "\t\t'one': True,\n", - "\t\t'two': False,\n", - "\t\t'three': False,\n", - "\t\t'four': False,\n", - "\t\t'five': True\n", - "\t}, False)\n", - "]\n", - "```\n", - "\n", - "To help you in this step, watch the [following video](https://www.youtube.com/watch?v=-vVskDsHcVc) to learn how to build the feature set with Python and NLTK. The source code in this video can be found [here](https://pythonprogramming.net/words-as-features-nltk-tutorial/)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "[![Building Features](building-features.jpg)](https://www.youtube.com/watch?v=-vVskDsHcVc)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Building and Traininng Naive Bayes Model\n", - "\n", - "In this step you will split your feature set into a training and a test set. Then you will create a Bayes classifier instance using `nltk.NaiveBayesClassifier.train` ([example](https://www.nltk.org/book/ch06.html)) to train with the training dataset.\n", - "\n", - "After training the model, call `classifier.show_most_informative_features()` to inspect the most important features. The output will look like:\n", - "\n", - "```\n", - "Most Informative Features\n", - "\t snow = True False : True = 34.3 : 1.0\n", - "\t easter = True False : True = 26.2 : 1.0\n", - "\t headach = True False : True = 20.9 : 1.0\n", - "\t argh = True False : True = 17.6 : 1.0\n", - "\tunfortun = True False : True = 16.9 : 1.0\n", - "\t jona = True True : False = 16.2 : 1.0\n", - "\t ach = True False : True = 14.9 : 1.0\n", - "\t sad = True False : True = 13.0 : 1.0\n", - "\t parent = True False : True = 12.9 : 1.0\n", - "\t spring = True False : True = 12.7 : 1.0\n", - "```\n", - "\n", - "The [following video](https://www.youtube.com/watch?v=rISOsUaTrO4) will help you complete this step. The source code in this video can be found [here](https://pythonprogramming.net/naive-bayes-classifier-nltk-tutorial/)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "[![Building and Training NB](nb-model-building.jpg)](https://www.youtube.com/watch?v=rISOsUaTrO4)" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Testing Naive Bayes Model\n", - "\n", - "Now we'll test our classifier with the test dataset. This is done by calling `nltk.classify.accuracy(classifier, test)`.\n", - "\n", - "As mentioned in one of the tutorial videos, a Naive Bayes model is considered OK if your accuracy score is over 0.6. If your accuracy score is over 0.7, you've done a great job!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Bonus Question 1: Improve Model Performance\n", - "\n", - "If you are still not exhausted so far and want to dig deeper, try to improve your classifier performance. There are many aspects you can dig into, for example:\n", - "\n", - "* Improve stemming and lemmatization. Inspect your bag of words and the most important features. Are there any words you should furuther remove from analysis? You can append these words to further remove to the stop words list.\n", - "\n", - "* Remember we only used the top 5,000 features to build model? Try using different numbers of top features. The bottom line is to use as few features as you can without compromising your model performance. The fewer features you select into your model, the faster your model is trained. Then you can use a larger sample size to improve your model accuracy score." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Bonus Question 2: Machine Learning Pipeline\n", - "\n", - "In a new Jupyter Notebook, combine all your codes into a function (or a class). Your new function will execute the complete machine learning pipeline job by receiving the dataset location and output the classifier. This will allow you to use your function to predict the sentiment of any tweet in real time. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Bonus Question 3: Apache Spark\n", - "\n", - "If you have completed the Apache Spark advanced topic lab, what you can do is to migrate your pipeline from local to a Databricks Notebook. Share your notebook with your instructor and classmates to show off your achievements!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# your code here" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -}