我有两个正在处理的数据文件。一个包含单词列表,其中包含有关这些单词的一些附加信息,另一个包含单词对(其中单词按其第一个表中的单词ID列出)及其频率。
词典文件(示例输出)

('wID', 'w1', 'w1cs', 'L1', 'c1')
('-----', '-----', '-----', '-----', '-----')
(1, ',', ',', ',', 'y')
(2, '.', '.', '.', 'y')
(3, 'the', 'the', 'the', 'at')
(4, 'and', 'and', 'and', 'cc')
(5, 'of', 'of', 'of', 'io')

Bigram文件(示例输出)
('freq', 'w1', 'w2')
(4, 22097, 161)
(1, 98664, 1320)
(1, 426515, 1345)
(1, 483675, 747)
(19, 63, 15496)
(2, 3011, 7944)
(1, 27985, 27778)

我使用sqlite创建了两个表,并从上面的文件上传了数据。
conn = sqlite3.connect('bigrams.db')
conn.text_factory = str
c = conn.cursor()
c.execute('pragma foreign_keys=ON')

词汇表
c.execute('''CREATE TABLE lex
            (wID INT PRIMARY KEY, w1 TEXT, w1cs TEXT, L1 TEXT, c1 TEXT)''')

#I removed this index as per CL.'s suggestion
#c.execute('''DROP INDEX IF EXISTS lex_index''')
#c.execute('''CREATE INDEX lex_index ON lex (wID, w1, c1)''')

#and added this one
c.execute('''CREATE INDEX lex_w1_index ON lex (w1)''')

将数据插入词典表
#I replaced this code
# with open('/Users/.../lexicon.txt', "rb") as lex_file:
#    for line in lex_file:
#        currentRow = line.split('\t')
#        try:
#            data = [currentRow[0], currentRow[1], currentRow[2], currentRow[3], str(currentRow[4].strip('\r\n'))]
#           c.executemany ('insert or replace into lex values (?, ?, ?, ?, ?)', (data,))
#        except IndexError:
#            pass


#with the one that Julian wrote

blocksize = 100000

with open('/Users/.../lexicon.txt', "rb") as lex_file:
    data = []
    line_counter = 0
    for line in lex_file:
        data.append(line.strip().split('\t'))
        line_counter += 1
        if line_counter % blocksize == 0:
            try:
                c.executemany ('insert or replace into lex values (?, ?, ?, ?, ?)', data)
                conn.commit()
            except IndexError:
                block_start = line_counter - blocksize + 1
                print 'Lex error lines {}-{}'.format(block_start, line_counter)
            finally:
                data = []

Bigram表
#I replaced this code to create table x2
#c.execute('''CREATE TABLE x2
#             (freq INT, w1 INT, w2 INT, FOREIGN KEY(w1) REFERENCES lex(wID), FOREIGN KEY(w2) REFERENCES lex(wID))''')

#with the code that Julian suggested
c.execute('''CREATE TABLE x2
             (freq INT, w1 INT, w2 INT,
              FOREIGN KEY(w1) REFERENCES lex(wID),
              FOREIGN KEY(w2) REFERENCES lex(wID),
              PRIMARY KEY(w1, w2) )''')

将数据插入Bigram表
#Replaced this code
#with open('/Users/.../x2.txt', "rb") as x2_file:
#    for line in x2_file:
#        currentRow = line.split('\t')
#        try:
#            data = [str(currentRow[0].replace('\x00','').replace('\xff\xfe','')), str(currentRow[1].replace('\x00','')), str(currentRow[2].replace('\x00','').strip('\r\n'))]
#           c.executemany('insert or replace into x2 values (?, ?, ?)', (data,))
#        except IndexError:
#            pass

#with this one suggested by Julian
with open('/Users/.../x2.txt', "rb") as x2_file:
    data = []
    line_counter = 0
    for line in x2_file:
        data.append(line.strip().replace('\x00','').replace('\xff\xfe','').split('\t'))
        line_counter += 1
        if line_counter % blocksize == 0:
            try:
                c.executemany('insert or replace into x2 values (?, ?, ?)', data)
                conn.commit()
            except IndexError:
                block_start = line_counter - blocksize + 1
                print 'x2 error lines {}-{}'.format(block_start, line_counter)
            finally:
                data = []

conn.close()

我希望能够检查数据中是否存在给定的单词对——例如“like new”
当我只指定第一个词时,程序工作正常。
cur.execute('''SELECT lex1.w1, lex2.w1 from x2
                INNER JOIN lex as lex1 ON lex1.wID=x2.w1
                INNER JOIN lex as lex2 ON lex2.wID=x2.w2
                WHERE lex1.w1= “like” ’’’)

但是当我想搜索一对单词时,代码会慢得让人痛苦。
cur.execute('''SELECT lex1.w1, lex2.w1 from x2
                    INNER JOIN lex as lex1 ON lex1.wID=x2.w1
                    INNER JOIN lex as lex2 ON lex2.wID=x2.w2
                    WHERE lex1.w1=“like” AND lex2.w1= “new” ''')

我不知道我做错了什么。
任何帮助都将不胜感激。

最佳答案

像这样定义x2表。

c.execute('''CREATE TABLE x2
             (freq INT, w1 INT, w2 INT,
              FOREIGN KEY(w1) REFERENCES lex(wID),
              FOREIGN KEY(w2) REFERENCES lex(wID),
              PRIMARY KEY(w1, w2) )''')

除了语义正确之外,这还创建了一个永久索引,可以大大加快查询速度。在不指定(w1,w2)对是表的主键的情况下,每次运行该查询时都必须临时重新创建该索引,这是一个昂贵的操作。
下面这样的代码可以用来重新定义一个表,而不需要重新导入所有内容。
c.execute('''
    create table x2_new (
        freq INT, w1 INT, w2 INT,
        FOREIGN KEY(w1) REFERENCES lex(wID),
        FOREIGN KEY(w2) REFERENCES lex(wID),
        PRIMARY KEY(w1, w2) )
''')
c.execute('insert into x2_new select * from x2')
c.execute('drop table x2')
c.execute('alter table x2_new rename to x2')
conn.commit()

下面的代码应该加快插入速度。
blocksize = 100000

with open('/Users/.../lexicon.txt', "rb") as lex_file:
    data = []
    line_counter = 0
    for line in lex_file:
        data.append(line.strip().split('\t'))
        line_counter += 1
        if line_counter % blocksize == 0:
            try:
                c.executemany ('insert or replace into lex values (?, ?, ?, ?, ?)', data)
                conn.commit()
            except IndexError:
                block_start = line_counter - blocksize + 1
                print 'Lex error lines {}-{}'.format(block_start, line_counter)
                conn.rollback()
            finally:
                data = []

with open('/Users/.../x2.txt', "rb") as x2_file:
    data = []
    line_counter = 0
    for line in x2_file:
        data.append(line.strip().replace('\x00','').replace('\xff\xfe','').split('\t'))
        line_counter += 1
        if line_counter % blocksize == 0:
            try:
                c.executemany('insert or replace into x2 values (?, ?, ?)', data)
                conn.commit()
            except IndexError:
                block_start = line_counter - blocksize + 1
                print 'x2 error lines {}-{}'.format(block_start, line_counter)
                conn.rollback()
            finally:
                data = []

08-05 16:26