Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
170 changes: 127 additions & 43 deletions your-code/challenge-1.ipynb → your-code/[challenge-1]Filipa T.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -66,20 +66,42 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import nltk #Natural Language tool kit -- this pacakge is quite a mess. Was poorly design and the documentation is not great\n",
"from nltk.stem import WordNetLemmatizer\n",
"from nltk.corpus import stopwords\n",
"from sklearn.feature_extraction.text import CountVectorizer\n",
"from sklearn.feature_extraction.text import TfidfVectorizer"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Ironhack's Q website is\n"
]
}
],
"source": [
"import re\n",
"def clean_up(s):\n",
" \"\"\"\n",
" Cleans up numbers, URLs, and special characters from a string.\n",
"\n",
" Args:\n",
" s: The string to be cleaned up.\n",
"\n",
" Returns:\n",
" A string that has been cleaned up.\n",
" \"\"\""
" s = re.sub(r'http\\S+', '', s)\n",
" s = re.sub(r'[^a-zA-Z\\s\\']', ' ', s)\n",
" s = re.sub(r'\\s+', ' ', s)\n",
" return s.strip() \n",
" \n",
"string = \"@Ironhack's-#Q website 776-is http://ironhack.com [(2018)]\"\n",
"clean_string = clean_up(string)\n",
"print(clean_string)"
]
},
{
Expand All @@ -101,20 +123,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 18,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package punkt to\n",
"[nltk_data] C:\\Users\\ljant\\AppData\\Roaming\\nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"data": {
"text/plain": [
"['Ironhack', 's', 'Q', 'website', 'is']"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"def tokenize(s):\n",
" \"\"\"\n",
" Tokenize a string.\n",
"from nltk.tokenize import word_tokenize\n",
"nltk.download('punkt')\n",
"\n",
" Args:\n",
" s: String to be tokenized.\n",
"def tokenize(s):\n",
" tokens = word_tokenize(s)\n",
" tokens = [token.replace(\"'\", \"\") for token in tokens]\n",
" return tokens\n",
"\n",
" Returns:\n",
" A list of words as the result of tokenization.\n",
" \"\"\""
"tokens = tokenize(clean_string)\n",
"tokens"
]
},
{
Expand Down Expand Up @@ -145,20 +187,43 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"execution_count": 22,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package wordnet to\n",
"[nltk_data] C:\\Users\\ljant\\AppData\\Roaming\\nltk_data...\n",
"[nltk_data] Package wordnet is already up-to-date!\n"
]
},
{
"data": {
"text/plain": [
"['Ironhack', 's', 'Q', 'website', 'is']"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"def stem_and_lemmatize(l):\n",
" \"\"\"\n",
" Perform stemming and lemmatization on a list of words.\n",
"nltk.download('wordnet')\n",
"from nltk.stem import WordNetLemmatizer\n",
"from nltk.corpus import wordnet\n",
"\n",
" Args:\n",
" l: A list of strings.\n",
"def stem_and_lemmatize(l):\n",
" lemmatizer = WordNetLemmatizer()\n",
" lemmatized = [lemmatizer.lemmatize(word) for word in l]\n",
" return lemmatized\n",
"\n",
" Returns:\n",
" A list of strings after being stemmed and lemmatized.\n",
" \"\"\""
"string = stem_and_lemmatize(tokens)\n",
"string"
]
},
{
Expand All @@ -176,20 +241,39 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 23,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package stopwords to\n",
"[nltk_data] C:\\Users\\ljant\\AppData\\Roaming\\nltk_data...\n",
"[nltk_data] Package stopwords is already up-to-date!\n"
]
},
{
"data": {
"text/plain": [
"['Ironhack', 'Q', 'website']"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"def remove_stopwords(l):\n",
" \"\"\"\n",
" Remove English stopwords from a list of strings.\n",
"from nltk.corpus import stopwords\n",
"nltk.download('stopwords')\n",
"\n",
" Args:\n",
" l: A list of strings.\n",
"def remove_stopwords(l):\n",
" clean = [word for word in l if word not in stopwords.words()]\n",
" return clean\n",
"\n",
" Returns:\n",
" A list of strings after stop words are removed.\n",
" \"\"\""
"string_clean = remove_stopwords(string)\n",
"string_clean"
]
},
{
Expand All @@ -204,7 +288,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
Expand All @@ -218,7 +302,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
"version": "3.9.7"
}
},
"nbformat": 4,
Expand Down
Loading