{ "cells": [ { "cell_type": "code", "execution_count": 2, "metadata": { "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "execution": { "iopub.execute_input": "2023-02-28T09:01:35.199038Z", "iopub.status.busy": "2023-02-28T09:01:35.198153Z", "iopub.status.idle": "2023-02-28T09:01:35.214562Z", "shell.execute_reply": "2023-02-28T09:01:35.213234Z", "shell.execute_reply.started": "2023-02-28T09:01:35.198993Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv\n" ] } ], "source": [ "# This Python 3 environment comes with many helpful analytics libraries installed\n", "# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n", "# For example, here's several helpful packages to load\n", "\n", "import numpy as np # linear algebra\n", "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n", "\n", "# Input data files are available in the read-only \"../input/\" directory\n", "# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n", "\n", "import os\n", "for dirname, _, filenames in os.walk('/kaggle/input'):\n", " for filename in filenames:\n", " print(os.path.join(dirname, filename))\n", "\n", "# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n", "# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:37.498012Z", "iopub.status.busy": "2023-02-28T09:01:37.497070Z", "iopub.status.idle": "2023-02-28T09:01:37.502150Z", "shell.execute_reply": "2023-02-28T09:01:37.501011Z", "shell.execute_reply.started": "2023-02-28T09:01:37.497972Z" } }, "outputs": [], "source": [ "import re # Regular expression" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Data Accqasation\n", "This notebook will do basic IMDB reviews sentiment analysis. As show in below image, we will be performing few text cleaning and model building techniques. The flow of the notebook." ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:40.837603Z", "iopub.status.busy": "2023-02-28T09:01:40.836859Z", "iopub.status.idle": "2023-02-28T09:01:42.179878Z", "shell.execute_reply": "2023-02-28T09:01:42.178776Z", "shell.execute_reply.started": "2023-02-28T09:01:40.837558Z" } }, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
reviewsentiment
0One of the other reviewers has mentioned that ...positive
1A wonderful little production. <br /><br />The...positive
2I thought this was a wonderful way to spend ti...positive
3Basically there's a family where a little boy ...negative
4Petter Mattei's \"Love in the Time of Money\" is...positive
\n", "
" ], "text/plain": [ " review sentiment\n", "0 One of the other reviewers has mentioned that ... positive\n", "1 A wonderful little production.

The... positive\n", "2 I thought this was a wonderful way to spend ti... positive\n", "3 Basically there's a family where a little boy ... negative\n", "4 Petter Mattei's \"Love in the Time of Money\" is... positive" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df = pd.read_csv(\"/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv\")\n", "df.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Text Preprocessing\n", "- Lower casing\n", "- Remove HTML Tags\n", "- Remove Punctuations\n", "- Remove Stopwords\n", "- Steamming and Lemmatization\n", "- Observation" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:44.623338Z", "iopub.status.busy": "2023-02-28T09:01:44.622496Z", "iopub.status.idle": "2023-02-28T09:01:44.635713Z", "shell.execute_reply": "2023-02-28T09:01:44.634598Z", "shell.execute_reply.started": "2023-02-28T09:01:44.623294Z" } }, "outputs": [ { "data": { "text/plain": [ "0 One of the other reviewers has mentioned that ...\n", "1 A wonderful little production.

The...\n", "2 I thought this was a wonderful way to spend ti...\n", "3 Basically there's a family where a little boy ...\n", "4 Petter Mattei's \"Love in the Time of Money\" is...\n", " ... \n", "49995 I thought this movie did a down right good job...\n", "49996 Bad plot, bad dialogue, bad acting, idiotic di...\n", "49997 I am a Catholic taught in parochial elementary...\n", "49998 I'm going to have to disagree with the previou...\n", "49999 No one expects the Star Trek movies to be high...\n", "Name: review, Length: 50000, dtype: object" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df['review']" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Lowercasing all the Data" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:46.978097Z", "iopub.status.busy": "2023-02-28T09:01:46.977348Z", "iopub.status.idle": "2023-02-28T09:01:47.121675Z", "shell.execute_reply": "2023-02-28T09:01:47.120539Z", "shell.execute_reply.started": "2023-02-28T09:01:46.978052Z" } }, "outputs": [], "source": [ "# Apply all the preprocessing techniques\n", "\n", "# Convert all the text to lowercase\n", "df['review'] = df['review'].str.lower()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Removeing HTML tags from Data" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:49.177674Z", "iopub.status.busy": "2023-02-28T09:01:49.176927Z", "iopub.status.idle": "2023-02-28T09:01:49.395393Z", "shell.execute_reply": "2023-02-28T09:01:49.394319Z", "shell.execute_reply.started": "2023-02-28T09:01:49.177633Z" } }, "outputs": [], "source": [ "# Removing HTML Tags\n", "def remove_html_tags(text):\n", " clean = re.compile('<.*?>')\n", " return re.sub(clean, '', text)\n", "\n", "df['review'] = df['review'].apply(remove_html_tags)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Removing URLs from Texts" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:52.159195Z", "iopub.status.busy": "2023-02-28T09:01:52.158402Z", "iopub.status.idle": "2023-02-28T09:01:52.674649Z", "shell.execute_reply": "2023-02-28T09:01:52.673548Z", "shell.execute_reply.started": "2023-02-28T09:01:52.159151Z" } }, "outputs": [], "source": [ "# Removing URLs from Texts\n", "def remove_urls(text):\n", " clean = re.compile(r'http\\S+|www.\\S+')\n", " return re.sub(clean, '', text)\n", "\n", "df['review'] = df['review'].apply(remove_urls)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Removing Punctuations from Data" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:54.689274Z", "iopub.status.busy": "2023-02-28T09:01:54.687886Z", "iopub.status.idle": "2023-02-28T09:01:55.752957Z", "shell.execute_reply": "2023-02-28T09:01:55.751887Z", "shell.execute_reply.started": "2023-02-28T09:01:54.689212Z" } }, "outputs": [], "source": [ "import string\n", "exclude = string.punctuation\n", "\n", "# Remove punctuation \n", "def remove_punc(text):\n", " return text.translate(str.maketrans('','',exclude))\n", "\n", "df['review'] = df['review'].apply(remove_punc) " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Chart word treatments(short form sms_slangs)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:58.088137Z", "iopub.status.busy": "2023-02-28T09:01:58.087588Z", "iopub.status.idle": "2023-02-28T09:01:58.102145Z", "shell.execute_reply": "2023-02-28T09:01:58.100570Z", "shell.execute_reply.started": "2023-02-28T09:01:58.088094Z" } }, "outputs": [], "source": [ "chart_words = {\"AFAIK\":\"As Far As I Know\",\n", "'AFK':'Away From Keyboard',\n", "'ASAP':'As Soon As Possible',\n", "'ATK':'At The Keyboard',\n", "'ATM':'At The Moment',\n", "'A3':'Anytime, Anywhere, Anyplace',\n", "'BAK':'Back At Keyboard',\n", "'BBL':'Be Back Later',\n", "'BBS':'Be Back Soon',\n", "'BFN':'Bye For Now',\n", "'B4N':'Bye For Now',\n", "'BRB':'Be Right Back',\n", "'BRT':'Be Right There',\n", "'BTW':'By The Way',\n", "'B4':'Before',\n", "'B4N':'Bye For Now',\n", "'CU':'See You',\n", "'CUL8R':'See You Later',\n", "'CYA':'See You',\n", "'FAQ':'Frequently Asked Questions',\n", "'FC':'Fingers Crossed',\n", "\"FWIW\":\"For What It's Worth\",\n", "\"FYI\":\"For Your Information\",\n", "\"GAL\":\"Get A Life\",\n", "\"GG\":\"Good Game\",\n", "\"GN\":\"Good Night\",\n", "\"GMTA\":\"Great Minds Think Alike\",\n", "\"GR8\":\"Great\",\n", "\"G9\":\"Genius\",\n", "\"IC\":\"I See\",\n", "\"ICQ\":\"I Seek you\",\n", "\"ILU\":\"I Love You\",\n", "\"IMHO\":\"In My Honest/Humble Opinion\",\n", "\"IMO\":\"In My Opinion\",\n", "\"IOW\":\"In Other Words\",\n", "\"IRL\":\"In Real Life\",\n", "\"KISS\":\"Keep It Simple Stupid\",\n", "\"LDR\":\"Long Distance Relationship\",\n", "\"LMAO\":\"Laugh My A Off\",\n", "\"LOL\":\"Laughing Out Loud\",\n", "\"LTNS\":\"Long Time No See\",\n", "\"L8R\":\"Later\",\n", "\"MTE\":\"My Thoughts Exactly\",\n", "\"M8\":\"Mate\",\n", "\"NRN\":\"No Reply Necessary\",\n", "\"OIC\":\"Oh I See\",\n", "\"PITA\":\"Pain In The A\",\n", "\"PRT\":\"Party\",\n", "\"PRW\":\"Parents Are Watching\",\n", "\"QPSA\":\"Que Pasa?\",\n", "\"ROFL\":\"Rolling On The Floor Laughing\",\n", "\"ROFLOL\":\"Rolling On The Floor Laughing Out Loud\",\n", "\"ROTFLMAO\":\"Rolling On The Floor Laughing My A Off\",\n", "\"SK8\":\"Skate\",\n", "\"STATS\":\"Your sex and age\",\n", "\"ASL\":\"Age, Sex, Location\",\n", "\"THX\":\"Thank You\",\n", "\"TTFN\":\"Ta-Ta For Now\",\n", "\"TTYL\":\"Talk To You Later\",\n", "\"U\":\"You\",\n", "\"U2\":\"You Too\",\n", "\"U4E\":\"Yours For Ever\",\n", "\"WB\":\"Welcome Back\",\n", "\"WTF\":\"What The F\",\n", "\"WTG\":\"Way To Go\",\n", "\"WUF\":\"Where Are You From\",\n", "'W8':'Wait',\n", "'7K':'Sick D Laugher'}" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:01:59.138690Z", "iopub.status.busy": "2023-02-28T09:01:59.138079Z", "iopub.status.idle": "2023-02-28T09:01:59.148390Z", "shell.execute_reply": "2023-02-28T09:01:59.147289Z", "shell.execute_reply.started": "2023-02-28T09:01:59.138632Z" } }, "outputs": [], "source": [ "def chart_conversations(text):\n", " new_text = []\n", " for w in text.split():\n", " if w.upper() in chart_words:\n", " new_text.append(chart_words[w.upper()])\n", " else:\n", " new_text.append(w)\n", " return \" \".join(new_text)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:01.222894Z", "iopub.status.busy": "2023-02-28T09:02:01.221976Z", "iopub.status.idle": "2023-02-28T09:02:01.230050Z", "shell.execute_reply": "2023-02-28T09:02:01.228866Z", "shell.execute_reply.started": "2023-02-28T09:02:01.222848Z" } }, "outputs": [ { "data": { "text/plain": [ "'Hello , Where Are You From , See You Later , Frequently Asked Questions , Be Back Later'" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "nm = \"Hello , WUF , CUL8R , FAQ , BBL\"\n", "chart_conversations(nm)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Spelling Correction:(spcy, textbolb, pyspellchecker)" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:05.938121Z", "iopub.status.busy": "2023-02-28T09:02:05.936910Z", "iopub.status.idle": "2023-02-28T09:02:06.457439Z", "shell.execute_reply": "2023-02-28T09:02:06.456315Z", "shell.execute_reply.started": "2023-02-28T09:02:05.938064Z" } }, "outputs": [], "source": [ "from textblob import TextBlob" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:08.338208Z", "iopub.status.busy": "2023-02-28T09:02:08.337106Z", "iopub.status.idle": "2023-02-28T09:02:09.128963Z", "shell.execute_reply": "2023-02-28T09:02:09.127873Z", "shell.execute_reply.started": "2023-02-28T09:02:08.338165Z" } }, "outputs": [ { "data": { "text/plain": [ "'certain conditions several generation ,read the notebook and also like notebook'" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "incorrect_text = \"certain conditions several ggenaeration ,read the notebook and alos like notboook\"\n", "\n", "# Spelling correction by Textblob\n", "textBlob = TextBlob(incorrect_text)\n", "textBlob.correct().string" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Removing Stopwords\n", "for POS - tagging we don't use stopword to remove is,am,are,was,and,but..." ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:12.057700Z", "iopub.status.busy": "2023-02-28T09:02:12.057305Z", "iopub.status.idle": "2023-02-28T09:02:12.070805Z", "shell.execute_reply": "2023-02-28T09:02:12.068811Z", "shell.execute_reply.started": "2023-02-28T09:02:12.057663Z" } }, "outputs": [ { "data": { "text/plain": [ "\"nltk.download('stopwords')\"" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Import the library and download the stop words:\n", "from nltk.corpus import stopwords\n", "stp = stopwords.words('english')\n", "\n", "# Other method if nltk stop words not present\n", "'''nltk.download('stopwords')'''" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:14.667306Z", "iopub.status.busy": "2023-02-28T09:02:14.666909Z", "iopub.status.idle": "2023-02-28T09:02:14.673664Z", "shell.execute_reply": "2023-02-28T09:02:14.672527Z", "shell.execute_reply.started": "2023-02-28T09:02:14.667267Z" } }, "outputs": [], "source": [ "# Define a function to remove stop words from the text:\n", "def remove_stopwords(text):\n", " new_text = []\n", " \n", " for word in text.split():\n", " if word in stp: # stp = stopwords.words('english')\n", " new_text.append('')\n", " else:\n", " new_text.append(word)\n", " \n", " x = new_text[:]\n", " new_text.clear()\n", " return \" \".join(x)" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:16.297289Z", "iopub.status.busy": "2023-02-28T09:02:16.296887Z", "iopub.status.idle": "2023-02-28T09:02:39.152572Z", "shell.execute_reply": "2023-02-28T09:02:39.151496Z", "shell.execute_reply.started": "2023-02-28T09:02:16.297251Z" } }, "outputs": [], "source": [ "df['review'] = df['review'].apply(remove_stopwords)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Handling Emoji\n", "- **Replace with meaning** -\n", "We can remove all emojis from the text using regular expressions\n", "- **Remove** -\n", "We can replace emojis with a text representation." ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:43.119351Z", "iopub.status.busy": "2023-02-28T09:02:43.118942Z", "iopub.status.idle": "2023-02-28T09:02:43.125800Z", "shell.execute_reply": "2023-02-28T09:02:43.124522Z", "shell.execute_reply.started": "2023-02-28T09:02:43.119314Z" } }, "outputs": [], "source": [ "# Remove by usning Regular expression\n", "def remove_emoji(text):\n", " emoji_pattern = re.compile(\"[\"\n", " u\"\\U0001F600-\\U0001F64F\" # emoticons\n", " u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n", " u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n", " u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n", " \"]+\", flags=re.UNICODE)\n", " return emoji_pattern.sub(r'', text)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:45.558806Z", "iopub.status.busy": "2023-02-28T09:02:45.557737Z", "iopub.status.idle": "2023-02-28T09:02:45.568747Z", "shell.execute_reply": "2023-02-28T09:02:45.567353Z", "shell.execute_reply.started": "2023-02-28T09:02:45.558761Z" } }, "outputs": [ { "data": { "text/plain": [ "'hello, world ,, '" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "emoji_text = \"hello, world 😀,😃,😄 😈 😀\"\n", "remove_emoji(emoji_text)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:02:48.839342Z", "iopub.status.busy": "2023-02-28T09:02:48.838411Z", "iopub.status.idle": "2023-02-28T09:02:59.528188Z", "shell.execute_reply": "2023-02-28T09:02:59.526961Z", "shell.execute_reply.started": "2023-02-28T09:02:48.839294Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: emoji in /opt/conda/lib/python3.7/site-packages (2.2.0)\n", "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", "\u001b[0m" ] } ], "source": [ "!pip install emoji" ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:03:01.798077Z", "iopub.status.busy": "2023-02-28T09:03:01.797654Z", "iopub.status.idle": "2023-02-28T09:03:01.833902Z", "shell.execute_reply": "2023-02-28T09:03:01.832698Z", "shell.execute_reply.started": "2023-02-28T09:03:01.798028Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "hello, world :grinning_face:,:grinning_face_with_big_eyes:,:grinning_face_with_smiling_eyes: :smiling_face_with_horns: :grinning_face:\n" ] } ], "source": [ "# Replacing emoji to text\n", "import emoji\n", "\n", "print(emoji.demojize(\"hello, world 😀,😃,😄 😈 😀\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Tokenizations :\n", "**- Word Tokenization**\n", "\n", "**- Sentence Tokenization**\n", "\n", "**White space tokenization:** This method splits a text into tokens based on the white space characters (e.g., spaces, tabs, newlines) between them. This is the simplest form of tokenization and can be implemented using the split() function in Python.\n", "\n", "**Punctuation-based tokenization:** This method splits a text into tokens based on the punctuation marks between them. This method is more complex than white space tokenization and can be implemented using regular expressions or the nltk library in Python.\n", "\n", "**Word-based tokenization:** This method splits a text into tokens based on the words between them. This method is more complex than white space tokenization and requires a language model that can identify the boundaries between words. The nltk library provides several pre-trained models for word-based tokenization, including the punkt model." ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:03:07.908166Z", "iopub.status.busy": "2023-02-28T09:03:07.907155Z", "iopub.status.idle": "2023-02-28T09:03:07.913903Z", "shell.execute_reply": "2023-02-28T09:03:07.912592Z", "shell.execute_reply.started": "2023-02-28T09:03:07.908110Z" } }, "outputs": [], "source": [ "sent_1 = \"This method splits by sentences. This tokenization implemented\"\n", "sent_2 = \"This method splits by word.This tokenization implemented\"\n", "\n", "# By Using NLTK\n", "from nltk.tokenize import word_tokenize ,sent_tokenize" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:03:10.058110Z", "iopub.status.busy": "2023-02-28T09:03:10.057093Z", "iopub.status.idle": "2023-02-28T09:03:10.079265Z", "shell.execute_reply": "2023-02-28T09:03:10.078026Z", "shell.execute_reply.started": "2023-02-28T09:03:10.058047Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "sentence_tokenize - ['This method splits by sentences.', 'This tokenization implemented']\n", "word_tokenize - ['This', 'method', 'splits', 'by', 'word.This', 'tokenization', 'implemented']\n" ] } ], "source": [ "print('sentence_tokenize -',sent_tokenize(sent_1))\n", "print('word_tokenize -',word_tokenize(sent_2))" ] }, { "cell_type": "code", "execution_count": 24, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:03:12.943338Z", "iopub.status.busy": "2023-02-28T09:03:12.942087Z", "iopub.status.idle": "2023-02-28T09:03:36.465661Z", "shell.execute_reply": "2023-02-28T09:03:36.464382Z", "shell.execute_reply.started": "2023-02-28T09:03:12.943284Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "sent_1 tokenize [This, method, splits, by, sentences, ., This, tokenization, implemented]\n", "sent_2 tokenize [This, method, splits, by, word, ., This, tokenization, implemented]\n" ] } ], "source": [ "# By Using Spacy\n", "import spacy\n", "nlp = spacy.load('en_core_web_sm')\n", "\n", "doc1 = nlp(sent_1)\n", "doc2 = nlp(sent_2)\n", "\n", "sent1 = []\n", "sent2 = []\n", "for token in doc1:\n", " sent1.append(token)\n", "for token in doc2:\n", " sent2.append(token)\n", "print('sent_1 tokenize',sent1)\n", "print('sent_2 tokenize',sent2)" ] }, { "cell_type": "code", "execution_count": 25, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:03:36.468767Z", "iopub.status.busy": "2023-02-28T09:03:36.467757Z", "iopub.status.idle": "2023-02-28T09:04:05.529030Z", "shell.execute_reply": "2023-02-28T09:04:05.527967Z", "shell.execute_reply.started": "2023-02-28T09:03:36.468723Z" } }, "outputs": [], "source": [ "# Apply nltk word_tokenize in imdb data\n", "from nltk.tokenize import word_tokenize\n", "def wrd_token(text):\n", " return word_tokenize(text)\n", "df['review'] = df['review'].apply(wrd_token)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Stemming :(It is slow in processing)\n" ] }, { "cell_type": "code", "execution_count": 28, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:04:35.737577Z", "iopub.status.busy": "2023-02-28T09:04:35.736859Z", "iopub.status.idle": "2023-02-28T09:04:35.743182Z", "shell.execute_reply": "2023-02-28T09:04:35.741453Z", "shell.execute_reply.started": "2023-02-28T09:04:35.737534Z" } }, "outputs": [], "source": [ "from nltk.stem.porter import PorterStemmer\n", "ps = PorterStemmer()" ] }, { "cell_type": "code", "execution_count": 29, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:04:46.312604Z", "iopub.status.busy": "2023-02-28T09:04:46.311973Z", "iopub.status.idle": "2023-02-28T09:07:17.076444Z", "shell.execute_reply": "2023-02-28T09:07:17.075197Z", "shell.execute_reply.started": "2023-02-28T09:04:46.312561Z" } }, "outputs": [ { "data": { "text/plain": [ "0 one review mention watch 1 oz episod youll hoo...\n", "1 wonder littl product film techniqu unassum old...\n", "2 thought wonder way spend time hot summer weeke...\n", "3 basic there famili littl boy jake think there ...\n", "4 petter mattei love time money visual stun film...\n", " ... \n", "49995 thought movi right good job wasnt creativ orig...\n", "49996 bad plot bad dialogu bad act idiot direct anno...\n", "49997 cathol taught parochi elementari school nun ta...\n", "49998 im go disagre previou comment side maltin one ...\n", "49999 one expect star trek movi high art fan expect ...\n", "Name: review, Length: 50000, dtype: object" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Function for applying stemming function\n", "def stem_words(text):\n", " return \" \".join([ps.stem(word) for word in text])\n", "\n", "df['review'].apply(stem_words)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Lemmatization :\n" ] }, { "cell_type": "code", "execution_count": 35, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:12:05.953498Z", "iopub.status.busy": "2023-02-28T09:12:05.952776Z", "iopub.status.idle": "2023-02-28T09:12:12.682133Z", "shell.execute_reply": "2023-02-28T09:12:12.679291Z", "shell.execute_reply.started": "2023-02-28T09:12:05.953435Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "NLTK Downloader\n", "---------------------------------------------------------------------------\n", " d) Download l) List u) Update c) Config h) Help q) Quit\n", "---------------------------------------------------------------------------\n" ] }, { "ename": "KeyboardInterrupt", "evalue": "Interrupted by user", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipykernel_308/4212150469.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnltk\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mnltk\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstem\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mWordNetLemmatizer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mnltk\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdownload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0mlemmatizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mWordNetLemmatizer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.7/site-packages/nltk/downloader.py\u001b[0m in \u001b[0;36mdownload\u001b[0;34m(self, info_or_id, download_dir, quiet, force, prefix, halt_on_error, raise_on_error)\u001b[0m\n\u001b[1;32m 659\u001b[0m \u001b[0;31m# function should make a new copy of self to use?\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 660\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdownload_dir\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_download_dir\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdownload_dir\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 661\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_interactive_download\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 662\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 663\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.7/site-packages/nltk/downloader.py\u001b[0m in \u001b[0;36m_interactive_download\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 982\u001b[0m \u001b[0mDownloaderGUI\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmainloop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 983\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mTclError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 984\u001b[0;31m \u001b[0mDownloaderShell\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 985\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 986\u001b[0m \u001b[0mDownloaderShell\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.7/site-packages/nltk/downloader.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1004\u001b[0m self._simple_interactive_menu(\n\u001b[1;32m 1005\u001b[0m 'd) Download', 'l) List', ' u) Update', 'c) Config', 'h) Help', 'q) Quit')\n\u001b[0;32m-> 1006\u001b[0;31m \u001b[0muser_input\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Downloader> '\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstrip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1007\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0muser_input\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m;\u001b[0m \u001b[0;32mcontinue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1008\u001b[0m \u001b[0mcommand\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0muser_input\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlower\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.7/site-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36mraw_input\u001b[0;34m(self, prompt)\u001b[0m\n\u001b[1;32m 1179\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_ident\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"shell\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1180\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_parent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"shell\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1181\u001b[0;31m \u001b[0mpassword\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1182\u001b[0m )\n\u001b[1;32m 1183\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.7/site-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36m_input_request\u001b[0;34m(self, prompt, ident, parent, password)\u001b[0m\n\u001b[1;32m 1217\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1218\u001b[0m \u001b[0;31m# re-raise KeyboardInterrupt, to truncate traceback\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1219\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Interrupted by user\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1220\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1221\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlog\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarning\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Invalid Message:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexc_info\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: Interrupted by user" ] } ], "source": [ "import nltk\n", "from nltk.stem import WordNetLemmatizer\n", "nltk.download() \n", "lemmatizer = WordNetLemmatizer()\n", "\n", "def lemma_words(text):\n", " return \" \".join([lemmatizer.lemmatize(word) for word in text])" ] }, { "cell_type": "code", "execution_count": 36, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:12:39.848502Z", "iopub.status.busy": "2023-02-28T09:12:39.847845Z", "iopub.status.idle": "2023-02-28T09:15:17.653995Z", "shell.execute_reply": "2023-02-28T09:15:17.652811Z", "shell.execute_reply.started": "2023-02-28T09:12:39.848457Z" } }, "outputs": [], "source": [ "df['lemma_review'] = df['review'].apply(stem_words)" ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:09:25.548239Z", "iopub.status.busy": "2023-02-28T09:09:25.547782Z", "iopub.status.idle": "2023-02-28T09:09:25.595610Z", "shell.execute_reply": "2023-02-28T09:09:25.594356Z", "shell.execute_reply.started": "2023-02-28T09:09:25.548199Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "No. of word in corpus - 599977\n", "No. of unique word in corpus - 55158\n" ] } ], "source": [ "# Total number of words in corpus and number of unique word.\n", "merge_list = []\n", "for row in df['review'][0:5000]:\n", " merge_list.extend(row)\n", "print('No. of word in corpus - ',len(merge_list))\n", "print('No. of unique word in corpus - ',len(set(merge_list)))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Text Representations Or Text Vectorization:\n", "\n", "### - Bag of word (Text classification)\n", "\n" ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T08:59:46.519000Z", "iopub.status.busy": "2023-02-28T08:59:46.516506Z", "iopub.status.idle": "2023-02-28T08:59:51.295200Z", "shell.execute_reply": "2023-02-28T08:59:51.294131Z", "shell.execute_reply.started": "2023-02-28T08:59:46.518957Z" } }, "outputs": [], "source": [ "from sklearn.feature_extraction.text import CountVectorizer\n", "cv = CountVectorizer()\n", "bow = cv.fit_transform(df['lemma_review'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T08:59:51.301302Z", "iopub.status.busy": "2023-02-28T08:59:51.300748Z" } }, "outputs": [], "source": [ "bow.toarray()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - N-Grams or Bag of Ngrams\n", "\n", "Dimension increase with increase in ngrams, It slows down the Algorithim and out of vocabulary ignored." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:00:17.147997Z", "iopub.status.busy": "2023-02-28T09:00:17.147340Z", "iopub.status.idle": "2023-02-28T09:00:18.009694Z", "shell.execute_reply": "2023-02-28T09:00:18.008034Z", "shell.execute_reply.started": "2023-02-28T09:00:17.147955Z" } }, "outputs": [ { "ename": "NameError", "evalue": "name 'df' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipykernel_308/285669394.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0msklearn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfeature_extraction\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtext\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mCountVectorizer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mcv_1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mCountVectorizer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mngram_range\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mbow2\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv_1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit_transform\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'lemma_review'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;31mNameError\u001b[0m: name 'df' is not defined" ] } ], "source": [ "from sklearn.feature_extraction.text import CountVectorizer\n", "cv_1 = CountVectorizer(ngram_range=(10,10))\n", "bow2 = cv_1.fit_transform(df['lemma_review'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "bow2.toarray()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### - Tf - idf (Term frequency and Inverse Document frequency)\n", "\n", "There sparcity present and out of vocabulary ignore, Dimension large if vocabulary large and symantic relationship not capture." ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:17:00.313046Z", "iopub.status.busy": "2023-02-28T09:17:00.312563Z", "iopub.status.idle": "2023-02-28T09:17:01.216547Z", "shell.execute_reply": "2023-02-28T09:17:01.214914Z", "shell.execute_reply.started": "2023-02-28T09:17:00.313002Z" } }, "outputs": [ { "ename": "NameError", "evalue": "name 'df' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipykernel_1018/587138577.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0msklearn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfeature_extraction\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtext\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mTfidfVectorizer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mtfidf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTfidfVectorizer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mtf_idf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtfidf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit_transform\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'lemma_review'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;31mNameError\u001b[0m: name 'df' is not defined" ] } ], "source": [ "from sklearn.feature_extraction.text import TfidfVectorizer\n", "tfidf = TfidfVectorizer()\n", "tf_idf = tfidf.fit_transform(df['lemma_review'])" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "execution": { "iopub.execute_input": "2023-02-28T09:16:54.789060Z", "iopub.status.busy": "2023-02-28T09:16:54.787898Z", "iopub.status.idle": "2023-02-28T09:16:54.868566Z", "shell.execute_reply": "2023-02-28T09:16:54.866662Z", "shell.execute_reply.started": "2023-02-28T09:16:54.789007Z" } }, "outputs": [ { "ename": "NameError", "evalue": "name 'tf_idf' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipykernel_1018/1279619201.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtf_idf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtoarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;31mNameError\u001b[0m: name 'tf_idf' is not defined" ] } ], "source": [ "tf_idf.toarray()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "df['review'][0]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 4 }