Testing the speed and the memory usage of various approaches to store information in python.
import sys
import time
import sqlite3
import string
import random
import tables
import pandas as pd
from collections import deque
NUMBER_OF_RECORDS_TO_POPULATE = 1000000 # one millions
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
def random_char(y):
"""Generate a random string of a given size"""
return ''.join(random.choice(string.ascii_letters) for x in range(y))
List of list
def populate_listoflist(n):
llist=[]
st = time.time()
for i in range(n):
target_seq = random_char(20)
start = 20
stop = 40
seqid = random_char(10)
item = [target_seq, start, stop, seqid]
llist.append(item)
if (i % (n/5)) == 0:
print (i, get_size(llist)/1e9, "GB", "Each entry size", get_size(item))
et = time.time()
print (f'Total Run Time:{(et-st):.8f}', "Total Size: ", get_size(llist)/1e9,"GB")
return llist
lol = populate_listoflist(NUMBER_OF_RECORDS_TO_POPULATE) # slows down with more record.
0 3.92e-07 GB Each entry size 288
200000 0.048072088 GB Each entry size 288
400000 0.096189856 GB Each entry size 288
600000 0.144026608 GB Each entry size 288
800000 0.192472288 GB Each entry size 288
Total Run Time:49.47805595 Total Size: 0.240697528 GB
List of set
def populate_listofset(n):
llist=[]
st = time.time()
for i in range(n):
target_seq = random_char(20)
start = 20
stop = 40
seqid = random_char(10)
item = (target_seq, start, stop, seqid)
llist.append(item)
if (i % (n/5)) == 0:
print (i, get_size(llist)/1e9, "GB", "Each entry size", get_size(item))
et = time.time()
print (f'Total Run Time:{(et-st):.8f}', "Total Size: ", get_size(llist)/1e9,"GB")
return llist
los = populate_listofset(NUMBER_OF_RECORDS_TO_POPULATE)
0 3.76e-07 GB Each entry size 272
200000 0.044872072 GB Each entry size 272
400000 0.08978984 GB Each entry size 272
600000 0.134426592 GB Each entry size 272
800000 0.179672272 GB Each entry size 272
Total Run Time:54.10458398 Total Size: 0.224697528 GB
List of list to dataframe
df = pd.DataFrame(los, columns = ['target_seq', 'start','stop', 'seqid'])
print("Size of dataframe:", get_size(df)/1e9, "GB")
Size of dataframe: 0.348007007 GB
NOTE: When list of set is converetd to dataframe, the size increase
Dict with list object
def populate_dictoflist(n):
ldict = dict(target_seq=[], start=[], stop=[], seqid=[])
st = time.time()
for i in range(n):
ldict["target_seq"].append(random_char(20))
ldict["start"].append(20)
ldict["stop"].append(40)
ldict["seqid"].append(random_char(10))
if (i % (n/5)) == 0:
print (i, get_size(llist)/1e9, "GB", "Each entry size", get_size(item))
et = time.time()
et = time.time()
print (f'Total Run Time:{(et-st):.8f}', "Total Size: ", get_size(llist)/1e9,"GB")
return ldict
# dol = populate_dictoflist(NUMBER_OF_RECORDS_TO_POPULATE)
Dict with list object is worst for speed as well as memory usage
So far, list of set is better in term of speed and memory utilization.
list of set with deque
def populate_listofset_withdeque(n):
llist = deque([])
st = time.time()
for i in range(n):
target_seq = random_char(20)
start = 20
stop = 40
seqid = random_char(10)
item = (target_seq, start, stop, seqid)
llist.append(item)
if (i % (n/5)) == 0:
print (i, get_size(llist)/1e9, "GB", "Each entry size", get_size(item))
et = time.time()
print (f'Total Run Time:{(et-st):.8f}', "Total Size: ", get_size(llist)/1e9,"GB")
return llist
los_withdeque = populate_listofset_withdeque(NUMBER_OF_RECORDS_TO_POPULATE)
0 9.12e-07 GB Each entry size 272
200000 0.044850912 GB Each entry size 272
400000 0.089700912 GB Each entry size 272
600000 0.134550912 GB Each entry size 272
800000 0.179400912 GB Each entry size 272
Total Run Time:50.93443680 Total Size: 0.224250696 GB
Comparision between los and los_withdeque, los is better interm of speed.
Use of pytables
- This will create/write file to local drive in h5f format.
h5f.close()
h5f = tables.open_file('targets.h5', 'w')
class Target(tables.IsDescription):
targetSeq = tables.StringCol(16)
targetStart = tables.Int64Col()
targetStop = tables.Int64Col()
targetSeqID = tables.StringCol(16)
def populate_pytable(n):
tbl = h5f.create_table('/', 'targetseq', Target, title="Target Sequences")
row = tbl.row
st = time.time()
for i in range(n):
row['targetSeq'] = random_char(20)
row['targetStart'] = 20
row['targetStop'] = 40
row['targetSeqID'] = random_char(20)
row.append()
if (i % (n/5)) == 0:
print (i, get_size(llist)/1e9, "GB", "Each entry size", get_size(item))
et = time.time()
print (f'Total Run Time:{(et-st):.8f}', "Total Size: ", get_size(tbl)/1e9,"GB")
return tbl
#py_table = populate_pytable(NUMBER_OF_RECORDS_TO_POPULATE)
py_table = populate_pytable(10)
0 0.243017528 GB Each entry size 288
2 0.243017528 GB Each entry size 288
4 0.243017528 GB Each entry size 288
6 0.243017528 GB Each entry size 288
8 0.243017528 GB Each entry size 288
Total Run Time:37.86861491 Total Size: 3.8417e-05 GB
NOTE: Even to populate 10 item runtime is too high.
py_table.flush() # to free up memory
i
10
i = 0
for r in py_table:
if i < 10:
targetSeq = r['targetSeq']
targetStart = r['targetStart']
targetStop = r['targetStop']
targetSeqID = r['targetSeqID']
print(i, r, targetSeq, targetStart, targetStop,targetSeqID)
i += 1
0 /targetseq.row (Row), pointing to row #0 b'cSVwnmKfVzEuLNsc' 20 40 b'XnHfVkswKdOIqcxs'
1 /targetseq.row (Row), pointing to row #1 b'ZjPvrZPoAypkGlav' 20 40 b'XXzQmKMihkkMgxFG'
2 /targetseq.row (Row), pointing to row #2 b'mwuKrcylRmQrjOUY' 20 40 b'xcfCqBYcosVvLtSX'
3 /targetseq.row (Row), pointing to row #3 b'hrxKubcrSPElykBA' 20 40 b'udXepAwlcZhdMUqM'
4 /targetseq.row (Row), pointing to row #4 b'yugOzNkXftiJFfCN' 20 40 b'mrkexqbkWScjGrCF'
5 /targetseq.row (Row), pointing to row #5 b'VqWOFThzHufUZkBg' 20 40 b'GqHhsxSEdhQCZtNE'
6 /targetseq.row (Row), pointing to row #6 b'vGGvewarvFIICHDb' 20 40 b'ebvBWdUlSLEMlVva'
7 /targetseq.row (Row), pointing to row #7 b'jeCyFaKlEosIFVWY' 20 40 b'IzCSbCvrNEXIdaxL'
8 /targetseq.row (Row), pointing to row #8 b'nsoCAjtJQxylstMn' 20 40 b'nbNnvoExSaSxdbrW'
9 /targetseq.row (Row), pointing to row #9 b'LYwmcuFhptkNimaF' 20 40 b'gFCFpaaGknTVLgyX'
h5f.close() # to close file
Adding record doesnot consume memory, but too slow to write into files.
sqlite3: with option to create a db in memory
# create a db in memory
db = sqlite3.connect("file::memory:")
cur = db.cursor()
# Create table
cur.execute('''CREATE TABLE target
(targetseq text, start int, stop int, seqid text)''')
<sqlite3.Cursor at 0x7f80d6f1d030>
def sql_populate(n):
st = time.time()
for i in range(n):
item = [random_char(20), 20, 40, random_char(10)]
cur.execute('insert into target values (?,?,?,?)', item)
if (i % (n/5)) == 0:
print (i, get_size(db)/1e9, "GB", "Each entry size", get_size(item))
et = time.time()
print (f'Total Run Time:{(et-st):.8f}', "Total Size: ", get_size(db)/1e9,"GB")
return db
sql_populate(NUMBER_OF_RECORDS_TO_POPULATE)
0 2.24e-07 GB Each entry size 288
200000 2.24e-07 GB Each entry size 288
400000 2.24e-07 GB Each entry size 288
600000 2.24e-07 GB Each entry size 288
800000 2.24e-07 GB Each entry size 288
Total Run Time:33.86733985 Total Size: 2.24e-07 GB
<sqlite3.Connection at 0x7f80d6ea7f10>
# retriving information from sql db - rowwise without fetching whole db into memory
cur.row_factory = sqlite3.Row
cursor = cur.execute("SELECT targetseq FROM target")
# for row in cursor:
# print(row[0])
tseq = []
for row in cursor:
tseq.append(row[0])
tseq[1:10]
['VyBbbIrDSOwAedpfxMCU',
'MeLyhrDXjXDPpKeHeIEO',
'sXgZFGfLlHpdUZnELwox',
'CdylNHKinGGTQDBKGooK',
'QWQSUYINbiMPRpxblCgE',
'hBTAuYBBGeoCIjiwsmwj',
'BPHjOXRQKMmIkifBYeOA',
'sAGYNTIdiwwVhABvaCMs',
'eZHxzWwgcaiqxzhAwksm']
get_size(tseq)/ 1e9
0.077697472
Fetching while db as a list is possible, but it take more time
cur.execute('SELECT * FROM target')
data = cur.fetchall()
print(get_size(data)/1e9,"GB")
0.168697528 GB
i = 0
for row in data:
eachrow = [row['targetseq'],row['start'],row['stop'],row['seqid']]
if i < 10:
print(eachrow)
i = i + 1
['slbANVLVbVbiCYHuOPsP', 20, 40, 'sguxJKtcMZ']
['VyBbbIrDSOwAedpfxMCU', 20, 40, 'HZCtrwevpq']
['MeLyhrDXjXDPpKeHeIEO', 20, 40, 'iRnSYrfYth']
['sXgZFGfLlHpdUZnELwox', 20, 40, 'gNmtjGlPQa']
['CdylNHKinGGTQDBKGooK', 20, 40, 'xUGpMUqXdj']
['QWQSUYINbiMPRpxblCgE', 20, 40, 'JtmbpoXOCW']
['hBTAuYBBGeoCIjiwsmwj', 20, 40, 'SJEUzIUOrN']
['BPHjOXRQKMmIkifBYeOA', 20, 40, 'gkMONnWjjI']
['sAGYNTIdiwwVhABvaCMs', 20, 40, 'PYXAcFPqvg']
['eZHxzWwgcaiqxzhAwksm', 20, 40, 'fiycaJWCfb']
cur.close()
db.commit()
db.close()
Conclusion
Creating a sql db in memory takes roughly 45.97231698 seconds for a million records, and doesnot add-up to memory usage when adding more records i.e. sys.getsizeof(obj) of db is constant at 2.24e-07 GB. This is because we are writing information to db. Fetching information does cost time, and memory.
Other best was list of set, with Total Run Time:46.37801480 Total Size: 0.224697528 GB. However, the sys.getsizeof(obj) increase with more records.
Now, lets try to profile sqldb with more records.
# create a db in memory
db = sqlite3.connect("file::memory:")
cur = db.cursor()
# Create table
cur.execute('''CREATE TABLE target
(targetseq text, start int, stop int, seqid text)''')
<sqlite3.Cursor at 0x7f80d6ec6c00>
sql_populate(NUMBER_OF_RECORDS_TO_POPULATE*10)
0 2.24e-07 GB Each entry size 288
2000000 2.24e-07 GB Each entry size 288
4000000 2.24e-07 GB Each entry size 288
6000000 2.24e-07 GB Each entry size 288
8000000 2.24e-07 GB Each entry size 288
Total Run Time:381.34169912 Total Size: 2.24e-07 GB
<sqlite3.Connection at 0x7f8016cf81f0>
cur.row_factory = sqlite3.Row
cursor = cur.execute("SELECT targetseq FROM target")
tseq = []
for row in cursor:
tseq.append(row[0])
tseq[1:10]
['sfRTJFtqwYoWhlxgxKEi',
'pgEygAqgJnKjxpDEBULq',
'HShUHujPrcIWuoiuVYFq',
'WokTyxEsAmiQGWFKHMzh',
'uzUfJdyiZnhtsNITrFPd',
'kcQypIMxXBVgxfYNwGHa',
'aWDizeMAqCZohttTpfzW',
'GYVWhOcHwnpxfZDGaEBX',
'ejJlnDqtJTmIjhHxGmzd']
cur.execute('SELECT * FROM target')
data = cur.fetchall()
print(get_size(data)/1e9,"GB")
1.68152812 GB
i = 0
for row in data:
eachrow = [row['targetseq'],row['start'],row['stop'],row['seqid']]
if i < 10:
print(eachrow)
i = i + 1
['mrpPlDMSXpZQrONdGesM', 20, 40, 'ZKaxYvdfPA']
['sfRTJFtqwYoWhlxgxKEi', 20, 40, 'zzzjamdrdL']
['pgEygAqgJnKjxpDEBULq', 20, 40, 'hiQoXVKQPu']
['HShUHujPrcIWuoiuVYFq', 20, 40, 'qqyVJNTnEQ']
['WokTyxEsAmiQGWFKHMzh', 20, 40, 'txTnlAQfud']
['uzUfJdyiZnhtsNITrFPd', 20, 40, 'sJjEJOoaVp']
['kcQypIMxXBVgxfYNwGHa', 20, 40, 'wmvJVLMssY']
['aWDizeMAqCZohttTpfzW', 20, 40, 'kiMkHXIWJx']
['GYVWhOcHwnpxfZDGaEBX', 20, 40, 'atGnxvJHcR']
['ejJlnDqtJTmIjhHxGmzd', 20, 40, 'wkDlqExjTX']
cur.close()
db.commit()
db.close()
## Note: We applied this approach in actual task: which involes- scanning entire human genome in both the forward and the reverse strand for 100% match of a PAM motif for CRISPR-Cas (Cas9- with NGG PAM) experiment). At each match poisiton, we need to record the match start/end position, 20bp of sequence upstream/downstream of the PAM location, and strands. In total, there were about 320 Millions matches, and creating a sql db with 320 Millions records took about 5 hours.
FASTED ONE!!!
Use generators instead of returning lists. Generators are produced by functions that use yield expressions.
def dataiter(n):
for i in range(n):
yield( (random_char(20), 20, 40, random_char(10)))
n = 1000000
t1 = time.time()
a = pd.DataFrame(dataiter(n),
columns = ['target_seq', 'start','stop', 'seqid'])
#a = pd.DataFrame(dataiter(n))
t2 = time.time()
print(t2-t1)
34.73754811286926
get_size(a)
320463967
a.target_seq = a.target_seq.astype('str')
a.start = a.start.astype('int8')
a.stop = a.stop.astype('int8')
a.seqid = a.seqid.astype('str')
get_size(a)
292120743
=======