Monday 6 November 2023

Diwali Sales Analysis

 

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
# import python libraries

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt # visualizing data
# %matplotlib inline
import seaborn as sns

# import csv file
df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Diwali Sales Data.csv', encoding= 'unicode_escape')

from google.colab import drive
drive.mount('/content/drive')

df.shape

df.head()

df.info()

#drop unrelated/blank columns
df.drop(['Status', 'unnamed1'], axis=1, inplace=True)

#check for null values
pd.isnull(df).sum()

# drop null values
df.dropna(inplace=True)

# change data type
df['Amount'] = df['Amount'].astype('int')

df['Amount'].dtypes

df.columns

#rename column
df.rename(columns= {'Marital_Status':'Shaadi'})

# describe() method returns description of the data in the DataFrame (i.e. count, mean, std, etc)
df.describe()

# use describe() for specific columns
df[['Age', 'Orders', 'Amount']].describe()

"""# Exploratory Data Analysis

### Gender
"""

# plotting a bar chart for Gender and it's count

ax = sns.countplot(x = 'Gender',data = df)

for bars in ax.containers:
    ax.bar_label(bars)

# plotting a bar chart for gender vs total amount

sales_gen = df.groupby(['Gender'], as_index=False)['Amount'].sum().sort_values(by='Amount', ascending=False)

sns.barplot(x = 'Gender',y= 'Amount' ,data = sales_gen)

"""*From above graphs we can see that most of the buyers are females and even the purchasing power of females are greater than men*

### Age
"""

ax = sns.countplot(data = df, x = 'Age Group', hue = 'Gender')

for bars in ax.containers:
    ax.bar_label(bars)

# Total Amount vs Age Group
sales_age = df.groupby(['Age Group'], as_index=False)['Amount'].sum().sort_values(by='Amount', ascending=False)

sns.barplot(x = 'Age Group',y= 'Amount' ,data = sales_age)

"""*From above graphs we can see that most of the buyers are of age group between 26-35 yrs female*

### State
"""

# total number of orders from top 10 states

sales_state = df.groupby(['State'], as_index=False)['Orders'].sum().sort_values(by='Orders', ascending=False).head(10)

sns.set(rc={'figure.figsize':(15,5)})
sns.barplot(data = sales_state, x = 'State',y= 'Orders')

# total amount/sales from top 10 states

sales_state = df.groupby(['State'], as_index=False)['Amount'].sum().sort_values(by='Amount', ascending=False).head(10)

sns.set(rc={'figure.figsize':(15,5)})
sns.barplot(data = sales_state, x = 'State',y= 'Amount')

"""*From above graphs we can see that most of the orders & total sales/amount are from Uttar Pradesh, Maharashtra and Karnataka respectively*

### Marital Status
"""

ax = sns.countplot(data = df, x = 'Marital_Status')

sns.set(rc={'figure.figsize':(7,5)})
for bars in ax.containers:
    ax.bar_label(bars)

sales_state = df.groupby(['Marital_Status', 'Gender'], as_index=False)['Amount'].sum().sort_values(by='Amount', ascending=False)

sns.set(rc={'figure.figsize':(6,5)})
sns.barplot(data = sales_state, x = 'Marital_Status',y= 'Amount', hue='Gender')

"""*From above graphs we can see that most of the buyers are married (women) and they have high purchasing power*

### Occupation
"""

sns.set(rc={'figure.figsize':(20,5)})
ax = sns.countplot(data = df, x = 'Occupation')

for bars in ax.containers:
    ax.bar_label(bars)

sales_state = df.groupby(['Occupation'], as_index=False)['Amount'].sum().sort_values(by='Amount', ascending=False)

sns.set(rc={'figure.figsize':(20,5)})
sns.barplot(data = sales_state, x = 'Occupation',y= 'Amount')

"""*From above graphs we can see that most of the buyers are working in IT, Healthcare and Aviation sector*

### Product Category
"""

sns.set(rc={'figure.figsize':(20,5)})
ax = sns.countplot(data = df, x = 'Product_Category')

for bars in ax.containers:
    ax.bar_label(bars)

sales_state = df.groupby(['Product_Category'], as_index=False)['Amount'].sum().sort_values(by='Amount', ascending=False).head(10)

sns.set(rc={'figure.figsize':(20,5)})
sns.barplot(data = sales_state, x = 'Product_Category',y= 'Amount')

"""*From above graphs we can see that most of the sold products are from Food, Clothing and Electronics category*"""

sales_state = df.groupby(['Product_ID'], as_index=False)['Orders'].sum().sort_values(by='Orders', ascending=False).head(10)

sns.set(rc={'figure.figsize':(20,5)})
sns.barplot(data = sales_state, x = 'Product_ID',y= 'Orders')

# top 10 most sold products (same thing as above)

fig1, ax1 = plt.subplots(figsize=(12,7))
df.groupby('Product_ID')['Orders'].sum().nlargest(10).sort_values(ascending=False).plot(kind='bar')

"""## Conclusion:

###

*Married women age group 26-35 yrs from UP,  Maharastra and Karnataka working in IT, Healthcare and Aviation are more likely to buy products from Food, Clothing and Electronics category*
"""




Link for Google Collaboratry: Click Here

House Price Prediction

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# -*- coding: utf-8 -*-
"""
Importing the Dependencies
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.datasets
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
from sklearn import metrics

"""Importing the Boston House Price Dataset"""

house_price_dataset = sklearn.datasets.load_boston()

print(house_price_dataset)

# Loading the dataset to a Pandas DataFrame
house_price_dataframe = pd.DataFrame(house_price_dataset.data, columns = house_price_dataset.feature_names)

# Print First 5 rows of our DataFrame
house_price_dataframe.head()

# add the target (price) column to the DataFrame
house_price_dataframe['price'] = house_price_dataset.target

house_price_dataframe.head()

# checking the number of rows and Columns in the data frame
house_price_dataframe.shape

# check for missing values
house_price_dataframe.isnull().sum()

# statistical measures of the dataset
house_price_dataframe.describe()

"""Understanding the correlation between various features in the dataset

1. Positive Correlation

2. Negative Correlation
"""

correlation = house_price_dataframe.corr()

# constructing a heatmap to nderstand the correlation
plt.figure(figsize=(10,10))
sns.heatmap(correlation, cbar=True, square=True, fmt='.1f', annot=True, annot_kws={'size':8}, cmap='Blues')

"""Splitting the data and Target"""

X = house_price_dataframe.drop(['price'], axis=1)
Y = house_price_dataframe['price']

print(X)
print(Y)

"""Splitting the data into Training data and Test data"""

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 2)

print(X.shape, X_train.shape, X_test.shape)

"""Model Training

XGBoost Regressor
"""

# loading the model
model = XGBRegressor()

# training the model with X_train
model.fit(X_train, Y_train)

"""Evaluation

Prediction on training data
"""

# accuracy for prediction on training data
training_data_prediction = model.predict(X_train)

print(training_data_prediction)

# R squared error
score_1 = metrics.r2_score(Y_train, training_data_prediction)

# Mean Absolute Error
score_2 = metrics.mean_absolute_error(Y_train, training_data_prediction)

print("R squared error : ", score_1)
print('Mean Absolute Error : ', score_2)

"""Visualizing the actual Prices and predicted prices"""

plt.scatter(Y_train, training_data_prediction)
plt.xlabel("Actual Prices")
plt.ylabel("Predicted Prices")
plt.title("Actual Price vs Preicted Price")
plt.show()

"""Prediction on Test Data """

# accuracy for prediction on test data
test_data_prediction = model.predict(X_test)

# R squared error
score_1 = metrics.r2_score(Y_test, test_data_prediction)

# Mean Absolute Error
score_2 = metrics.mean_absolute_error(Y_test, test_data_prediction)

print("R squared error : ", score_1)
print('Mean Absolute Error : ', score_2)

Cars Price Prediction

 

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
"""

Importing the Dependencies
"""

import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn import metrics

"""Data Collection and Processing"""

# loading the data from csv file to pandas dataframe
car_dataset = pd.read_csv('/content/car data.csv')

# inspecting the first 5 rows of the dataframe
car_dataset.head()

# checking the number of rows and columns
car_dataset.shape

# getting some information about the dataset
car_dataset.info()

# checking the number of missing values
car_dataset.isnull().sum()

# checking the distribution of categorical data
print(car_dataset.Fuel_Type.value_counts())
print(car_dataset.Seller_Type.value_counts())
print(car_dataset.Transmission.value_counts())

"""Encoding the Categorical Data"""

# encoding "Fuel_Type" Column
car_dataset.replace({'Fuel_Type':{'Petrol':0,'Diesel':1,'CNG':2}},inplace=True)

# encoding "Seller_Type" Column
car_dataset.replace({'Seller_Type':{'Dealer':0,'Individual':1}},inplace=True)

# encoding "Transmission" Column
car_dataset.replace({'Transmission':{'Manual':0,'Automatic':1}},inplace=True)

car_dataset.head()

"""Splitting the data and Target"""

X = car_dataset.drop(['Car_Name','Selling_Price'],axis=1)
Y = car_dataset['Selling_Price']

print(X)

print(Y)

"""Splitting Training and Test data"""

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1, random_state=2)

"""Model Training

1. Linear Regression
"""

# loading the linear regression model
lin_reg_model = LinearRegression()

lin_reg_model.fit(X_train,Y_train)

"""Model Evaluation"""

# prediction on Training data
training_data_prediction = lin_reg_model.predict(X_train)

# R squared Error
error_score = metrics.r2_score(Y_train, training_data_prediction)
print("R squared Error : ", error_score)

"""Visualize the actual prices and Predicted prices"""

plt.scatter(Y_train, training_data_prediction)
plt.xlabel("Actual Price")
plt.ylabel("Predicted Price")
plt.title(" Actual Prices vs Predicted Prices")
plt.show()

# prediction on Training data
test_data_prediction = lin_reg_model.predict(X_test)

# R squared Error
error_score = metrics.r2_score(Y_test, test_data_prediction)
print("R squared Error : ", error_score)

plt.scatter(Y_test, test_data_prediction)
plt.xlabel("Actual Price")
plt.ylabel("Predicted Price")
plt.title(" Actual Prices vs Predicted Prices")
plt.show()

"""2. Lasso Regression"""

# loading the linear regression model
lass_reg_model = Lasso()

lass_reg_model.fit(X_train,Y_train)

"""Model Evaluation"""

# prediction on Training data
training_data_prediction = lass_reg_model.predict(X_train)

# R squared Error
error_score = metrics.r2_score(Y_train, training_data_prediction)
print("R squared Error : ", error_score)

"""Visualize the actual prices and Predicted prices"""

plt.scatter(Y_train, training_data_prediction)
plt.xlabel("Actual Price")
plt.ylabel("Predicted Price")
plt.title(" Actual Prices vs Predicted Prices")
plt.show()

# prediction on Training data
test_data_prediction = lass_reg_model.predict(X_test)

# R squared Error
error_score = metrics.r2_score(Y_test, test_data_prediction)
print("R squared Error : ", error_score)

plt.scatter(Y_test, test_data_prediction)
plt.xlabel("Actual Price")
plt.ylabel("Predicted Price")
plt.title(" Actual Prices vs Predicted Prices")
plt.show()

Diabetes Prediction Using Python

 

  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
"""Importing the Dependencies
"""

import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.metrics import accuracy_score

"""Data Collection and Analysis

PIMA Diabetes Dataset
"""

# loading the diabetes dataset to a pandas DataFrame
diabetes_dataset = pd.read_csv('.../diabetes.csv')

pd.read_csv?

# printing the first 5 rows of the dataset
diabetes_dataset.head()

# number of rows and Columns in this dataset
diabetes_dataset.shape

# getting the statistical measures of the data
diabetes_dataset.describe()

diabetes_dataset['Outcome'].value_counts()

"""0 --> Non-Diabetic

1 --> Diabetic
"""

diabetes_dataset.groupby('Outcome').mean()

# separating the data and labels
X = diabetes_dataset.drop(columns = 'Outcome', axis=1)
Y = diabetes_dataset['Outcome']

print(X)

print(Y)

"""Data Standardization"""

scaler = StandardScaler()

scaler.fit(X)

standardized_data = scaler.transform(X)

print(standardized_data)

X = standardized_data
Y = diabetes_dataset['Outcome']

print(X)
print(Y)

"""Train Test Split"""

X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.2, stratify=Y, random_state=2)

print(X.shape, X_train.shape, X_test.shape)

"""Training the Model"""

classifier = svm.SVC(kernel='linear')

#training the support vector Machine Classifier
classifier.fit(X_train, Y_train)

"""Model Evaluation

Accuracy Score
"""

# accuracy score on the training data
X_train_prediction = classifier.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)

print('Accuracy score of the training data : ', training_data_accuracy)

# accuracy score on the test data
X_test_prediction = classifier.predict(X_test)
test_data_accuracy = accuracy_score(X_test_prediction, Y_test)

print('Accuracy score of the test data : ', test_data_accuracy)

"""Making a Predictive System"""

input_data = (5,166,72,19,175,25.8,0.587,51)

# changing the input_data to numpy array
input_data_as_numpy_array = np.asarray(input_data)

# reshape the array as we are predicting for one instance
input_data_reshaped = input_data_as_numpy_array.reshape(1,-1)

# standardize the input data
std_data = scaler.transform(input_data_reshaped)
print(std_data)

prediction = classifier.predict(std_data)
print(prediction)

if (prediction[0] == 0):
  print('The person is not diabetic')
else:
  print('The person is diabetic')

Sunday 4 October 2020

Extract audio from video file

 Audio to Video Convertor:




import moviepy.editor
import tkinter.filedialog
print("press Y to choose the video file to enter:")
if input().casefold()=='y':
    video_path=tkinter.filedialog.askopenfilename()
    video=moviepy.editor.VideoFileClip(video_path)
    audio=video.audio
    audio.write_audiofile('Audio1.mp3')
else:
    print("You need to choose a file to move ahead") 
 

  Read more about moviepy: https://pypi.org/project/moviepy/

Read about tkinter: https://docs.python.org/3/library/tkinter.html

Tuesday 15 September 2020

Writing text to an Image

 


In this project we are going to learn how we can add text to an image.

This can be used

  •  to create an Image quote in Python.
  • to create watermark in Python

 

We will be using PIL (pillow) Library in this project

To create Image we will use Image and ImageDraw module

To set fonts we will use ImageFont module

and

To write text on an image we will use multiline_text() method if ImageDraw module 

 

To Create an Image quote we require a background Image of our own choice on which we can write our quote

Lets define the tasks to be be performed for this project and work accordingly

Task 0: Import libraries:

from PIL import Image, ImageDraw, ImageFont

Task 1: Read an Image

imgObject=Image.open('full or relative path of your image')

Image.open() reads the image and returns an Image object


Task 2: Set the font of the text you want to add to the image

font_object = ImageFont.truetype("E:\\Font\\sunday-spring\\Sunday Spring.ttf", 350)

Download any font file of your own choice from google and  specify the path inside truetype() method of ImageFont class

If you font file is with extension .otf then you can call opentype() method of ImageFont class

font_object = ImageFont.opentype("E:\\Font\\sunday-spring\\Sunday Spring.otf", 350)

Task 3 : Creating ImageDraw object to draw text on an Image

drawing_object = ImageDraw.Draw(imgObject)

Inside the ImageDraw.Draw we need to specify image object so that we can bind the drawing pen to the image

Task 4: Write the text 

drawing_object.multiline_text((400,800), "MV's Code Guide", font=font_object, fill=(0, 0, 0))

With the drawing object call multiline_text() method to add multiline text or else you can call text() method to add single line text

Inside multiline_text() method :

  • First parameter is xy coordinate of Top left corner of the text.
  • Second Parameter is- Text to be drawn.
  •  Third parameter is : font – An ImageFont instance 
  • Fourth parameter is : fill – Color to use for the text.
You an also specify other parameter like
  • spacing – The number of pixels between lines.
  • align"left", "center" or "right".
  • direction – Direction of the text. It can be "rtl" (right to left), "ltr" (left to right) or "ttb" (top to bottom). Requires libraqm.

 Task 5: Save the new Image with text:

drawing_object.save('new_image.jpg')

Full code:

#Example: Create Image Quote

from PIL import Image, ImageDraw, ImageFont

# create an image object
#imgObject = Image.new("RGB", (500, 500), (255, 255, 255))
 
imgObject=Image.open('Path of Image')
 
 
# get a font object
font_object = ImageFont.truetype("E:\\Font\\sunday-spring\\Sunday Spring.ttf", 350)
 
 
# get a drawing context
drawing_object = ImageDraw.Draw(imgObject)

 
# draw multiline text
drawing_object.multiline_text((400,800), "MV's Code Guide", font=font_object, fill=(0, 0, 0))
 
 
# save the image
drawing_object.save('new_image.jpg')

 

Sunday 13 September 2020

Extract Text From Image Using Python

 

 

We are going to use pytesseract and pillow library to work on this project!

Before you start coding, you need to complete three tasks:

1. Click on the link below and install tesseract-OCR

https://github.com/UB-Mannheim/tesseract/wiki

After you install the setup successfully, take a note of where you are saving the file because we need that path in our code.

2. Install pytesseract by using command : pip install pytesseract

3. Install pillow by using command : pip install pillow 

 

Source Code:

 
import pytesseract
from PIL import Image
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
value=Image.open('logo.png')
text=pytesseract.image_to_string(value)
print("Extracted Data is: \n ", text) 
 

Notice that if you want to add full path, you need to add double slash (\\) instead of single slash (\) in you path

For Example: If your path looks like this: D:\Python_Program\my_Image.jpg

Then replace \ with \\

something like : D:\\Python_Program\\my_Image.jpg

Or

Add  alphabet 'r' before your string
example:     r'C:\Program Files\Tesseract-OCR\tesseract.exe'

and then you are good to go!

 

Thursday 10 September 2020

Spell Checker with Python

 We can use two different packages to create spell checker

1. Using textblob

2. Using pyspellchecker

Video Explanation:


 

 

 Source Code:

 Using textblob:

 
from textblob import TextBlob
misspelled_word= "Incorrct"
corrected_word= TextBlob(misspelled_word).correct()
print("Misspelled Word:",misspelled_word)
print("Corrected Word: ",corrected_word)

# you can also use list of misspelled words
print("========================================================")
misspelled_word_list=['Incorrct','Mang','Summay','Watr','Appl']
for word in misspelled_word_list:
    correct_word=TextBlob(word).correct()
    print("Misspelled Word:",misspelled_word_list)
    print("Corrected Word: ",correct_word) 
 

Using pyspellchecker:

 
from spellchecker import SpellChecker
spell=SpellChecker()
misspelled_word=spell.unknown(['Incorrct', 'Summry','Mondy','Spellng','Pyhon'])
print(type(misspelled_word))
for word in misspelled_word:
    print("Corrected word is ", spell.correction(word))
    print("Candidate words:",spell.candidates(word)) 
 
 


You can play around this code and make changes according to your need!

You can also read a text file and use this code to check the spellings in the file


from textblob import TextBlob
from spellchecker import SpellChecker

f=open('demo.txt','r')
data=str(f.read())
print(data)
# you can also use list of misspelled words
print("\n\n**************************Using textblob**************************************")
misspelled_word_list= data.split()
for word in misspelled_word_list:
    correct_word=TextBlob(word).correct()
    print("\nWord: ",word)
    print('------------------------------')
    print("Corrected Word: ",correct_word)
    print("\n================================")

print('\n***************************Using pyspellchecker***********************************')
spell=SpellChecker()
misspelled_word=spell.unknown(misspelled_word_list)
print(type(misspelled_word))
for word in misspelled_word:
    print("\nword: ",word)
    print('--------------------------------------------')
    print("Corrected word is ", spell.correction(word))
    print('--------------------------------------------')
    print("Candidate words:",spell.candidates(word))
    print("\n================================")

f.close()

  


output:

Artifcial intelligence (AI), someties called macine intelligence, is intellience demonstated by machines, unlike the
naural intelligence dislayed by humans and animls.



**************************Using textblob**************************************

Word:  Artifcial
------------------------------
Corrected Word:  Artificial

================================

Word:  intelligence
------------------------------
Corrected Word:  intelligence

================================

Word:  (AI),
------------------------------
Corrected Word:  (of),

================================

Word:  someties
------------------------------
Corrected Word:  sometimes

================================

Word:  called
------------------------------
Corrected Word:  called

================================

Word:  macine
------------------------------
Corrected Word:  machine

================================

Word:  intelligence,
------------------------------
Corrected Word:  intelligence,

================================

Word:  is
------------------------------
Corrected Word:  is

================================

Word:  intellience
------------------------------
Corrected Word:  intelligence

================================

Word:  demonstated
------------------------------
Corrected Word:  demonstrated

================================

Word:  by
------------------------------
Corrected Word:  by

================================

Word:  machines,
------------------------------
Corrected Word:  machines,

================================

Word:  unlike
------------------------------
Corrected Word:  unlike

================================

Word:  the
------------------------------
Corrected Word:  the

================================

Word:  naural
------------------------------
Corrected Word:  natural

================================

Word:  intelligence
------------------------------
Corrected Word:  intelligence

================================

Word:  dislayed
------------------------------
Corrected Word:  displayed

================================

Word:  by
------------------------------
Corrected Word:  by

================================

Word:  humans
------------------------------
Corrected Word:  humans

================================

Word:  and
------------------------------
Corrected Word:  and

================================

Word:  animls.
------------------------------
Corrected Word:  animals.

================================

***************************Using pyspellchecker***********************************
<class 'set'>

word:  (ai),
--------------------------------------------
Corrected word is  (ai),
--------------------------------------------
Candidate words: {'(ai),'}

================================

word:  someties
--------------------------------------------
Corrected word is  sometimes
--------------------------------------------
Candidate words: {'sometimes'}

================================

word:  machines,
--------------------------------------------
Corrected word is  machines
--------------------------------------------
Candidate words: {'machines'}

================================

word:  intelligence,
--------------------------------------------
Corrected word is  intelligence
--------------------------------------------
Candidate words: {'intelligences', 'intelligence'}

================================

word:  demonstated
--------------------------------------------
Corrected word is  demonstrated
--------------------------------------------
Candidate words: {'demonstrated'}

================================

word:  naural
--------------------------------------------
Corrected word is  natural
--------------------------------------------
Candidate words: {'natural', 'aural', 'neural'}

================================

word:  dislayed
--------------------------------------------
Corrected word is  displayed
--------------------------------------------
Candidate words: {'displayed', 'dismayed'}

================================

word:  animls.
--------------------------------------------
Corrected word is  animals
--------------------------------------------
Candidate words: {'animist', 'animus', 'animals', 'animism'}

================================

word:  macine
--------------------------------------------
Corrected word is  machine
--------------------------------------------
Candidate words: {'machine', 'maine', 'racine', 'maxine', 'marine'}

================================

word:  intellience
--------------------------------------------
Corrected word is  intelligence
--------------------------------------------
Candidate words: {'intelligence'}

================================

word:  artifcial
--------------------------------------------
Corrected word is  artificial
--------------------------------------------
Candidate words: {'artifical', 'artificial'}

================================

 


Saturday 22 August 2020

Counting vowels in a string

 Using count() method:

string_data=input("Enter a string:")
vowels={'a','e','i','o','u'}
# Using Count() Method
list1=[]
for ch in string_data:
	if ch in vowels:
		if ch not in list1:
			list1.append(ch)
			print("{} occurred {} times".format(ch,string_data.count(ch)))

Without using count() method:

string_data=input("Enter a string:")
vowels={'a','e','i','o','u'}
d={}
for ch in string_data:
	if ch in vowels:
		d[ch]=d.get(ch,0)+1

print(d)
for key,value in sorted(d.items()):
	print(key,"occurred",value,"times")

Friday 21 August 2020

Counting number of occurrences of each character in a string


Using count() method:

string_data=input("Enter string: ")
l=[]
for x in string_data:
	if x not in l:
		l.append(x)
		print(x,"occurred",string_data.count(x),"times")

Without using count() method:

string_data=input("Enter string: ")
d={}
for x in string_data:
	d[x]=d.get(x,0) + 1
print(d)
print(d.items())
# for meaningful output
for key,value in sorted(d.items()):
	print(key,"occurred",value,"times")  

 

Question by Subscriber: 

 How to ignore special characters like ? , . etc

If you want to ignore special characters and focus on the alphabets and digits only, then you can do that by some extra work

You  can either import string class to get a set of all characters that you need to focus on, or you can manually add the set of characters you particularly want to focus on.

we will name them as valid_characters
Here goes the code:

import string
string_data=input("Enter string: ")
l=[]
valid_characters=set(string.ascii_letters+string.digits)
for x in string_data:
	if x in valid_characters:
		if x not in l:
			l.append(x)
			print(x,"occurred",string_data.count(x),"times")

Output: 

or

string_data=input("Enter string: ")
l=[]
valid_characters=set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"+'0123456789')
for x in string_data:
	if x in valid_characters:
		if x not in l:
			l.append(x)
			print(x,"occurred",string_data.count(x),"times")



Hope you like it ..!