Commit cf58497b authored by Ngan Thi Dong's avatar Ngan Thi Dong
Browse files

add UniRep model

parent 1ada39ff
"""
Utilities for data processing.
"""
import tensorflow as tf
import numpy as np
import os
"""
File formatting note.
Data should be preprocessed as a sequence of comma-seperated ints with
sequences /n seperated
"""
# Lookup tables
aa_to_int = {
'M':1,
'R':2,
'H':3,
'K':4,
'D':5,
'E':6,
'S':7,
'T':8,
'N':9,
'Q':10,
'C':11,
'U':12,
'G':13,
'P':14,
'A':15,
'V':16,
'I':17,
'F':18,
'Y':19,
'W':20,
'L':21,
'O':22, #Pyrrolysine
'X':23, # Unknown
'Z':23, # Glutamic acid or GLutamine
'B':23, # Asparagine or aspartic acid
'J':23, # Leucine or isoleucine
'start':24,
'stop':25,
}
int_to_aa = {value:key for key, value in aa_to_int.items()}
def get_aa_to_int():
"""
Get the lookup table (for easy import)
"""
return aa_to_int
def get_int_to_aa():
"""
Get the lookup table (for easy import)
"""
return int_to_aa
# Helper functions
def aa_seq_to_int(s):
"""
Return the int sequence as a list for a given string of amino acids
"""
return [24] + [aa_to_int[a] for a in s] + [25]
def int_seq_to_aa(s):
"""
Return the int sequence as a list for a given string of amino acids
"""
return "".join([int_to_aa[i] for i in s])
def tf_str_len(s):
"""
Returns length of tf.string s
"""
return tf.size(tf.string_split([s],""))
def tf_rank1_tensor_len(t):
"""
Returns the length of a rank 1 tensor t as rank 0 int32
"""
l = tf.reduce_sum(tf.sign(tf.abs(t)), 0)
return tf.cast(l, tf.int32)
def tf_seq_to_tensor(s):
"""
Input a tf.string of comma seperated integers.
Returns Rank 1 tensor the length of the input sequence of type int32
"""
return tf.string_to_number(
tf.sparse_tensor_to_dense(tf.string_split([s],","), default_value='0'), out_type=tf.int32
)[0]
def smart_length(length, bucket_bounds=tf.constant([128, 256])):
"""
Hash the given length into the windows given by bucket bounds.
"""
# num_buckets = tf_len(bucket_bounds) + tf.constant(1)
# Subtract length so that smaller bins are negative, then take sign
# Eg: len is 129, sign = [-1,1]
signed = tf.sign(bucket_bounds - length)
# Now make 1 everywhere that length is greater than bound, else 0
greater = tf.sign(tf.abs(signed - tf.constant(1)))
# Now simply sum to count the number of bounds smaller than length
key = tf.cast(tf.reduce_sum(greater), tf.int64)
# This will be between 0 and len(bucket_bounds)
return key
def pad_batch(ds, batch_size, padding=None, padded_shapes=([None])):
"""
Helper for bucket batch pad- pads with zeros
"""
return ds.padded_batch(batch_size,
padded_shapes=padded_shapes,
padding_values=padding
)
def aas_to_int_seq(aa_seq):
int_seq = ""
for aa in aa_seq:
int_seq += str(aa_to_int[aa]) + ","
return str(aa_to_int['start']) + "," + int_seq + str(aa_to_int['stop'])
# Preprocessing in python
def fasta_to_input_format(source, destination):
# I don't know exactly how to do this in tf, so resorting to python.
# Should go line by line so everything is not loaded into memory
sourcefile = os.path.join(source)
destination = os.path.join(destiation)
with open(sourcefile, 'r') as f:
with open(destination, 'w') as dest:
seq = ""
for line in f:
if line[0] == '>' and not seq == "":
dest.write(aas_to_int_seq(seq) + '\n')
seq = ""
elif not line[0] == '>':
seq += line.replace("\n","")
# Real data pipelines
def bucketbatchpad(
batch_size=256,
path_to_data=os.path.join("./data/SwissProt/sprot_ints.fasta"), # Preprocessed- see note
compressed="", # See tf.contrib.data.TextLineDataset init args
bounds=[128,256], # Default buckets of < 128, 128><256, >256
# Unclear exactly what this does, should proly equal batchsize
window_size=256, # NOT a tensor
padding=None, # Use default padding of zero, otherwise see Dataset docs
shuffle_buffer=None, # None or the size of the buffer to shuffle with
pad_shape=([None]),
repeat=1,
filt=None
):
"""
Streams data from path_to_data that is correctly preprocessed.
Divides into buckets given by bounds and pads to full length.
Returns a dataset which will return a padded batch of batchsize
with iteration.
"""
batch_size=tf.constant(batch_size, tf.int64)
bounds=tf.constant(bounds)
window_size=tf.constant(window_size, tf.int64)
path_to_data = os.path.join(path_to_data)
# Parse strings to tensors
dataset = tf.data.TextLineDataset(path_to_data).map(tf_seq_to_tensor)
if filt is not None:
dataset = dataset.filter(filt)
if shuffle_buffer:
# Stream elements uniformly randomly from a buffer
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Apply a repeat. Because this is after the shuffle, all elements of the dataset should be seen before repeat.
# See https://stackoverflow.com/questions/44132307/tf-contrib-data-dataset-repeat-with-shuffle-notice-epoch-end-mixed-epochs
dataset = dataset.repeat(count=repeat)
# Apply grouping to bucket and pad
grouped_dataset = dataset.apply(tf.data.experimental.group_by_window(
key_func=lambda seq: smart_length(tf_rank1_tensor_len(seq), bucket_bounds=bounds), # choose a bucket
reduce_func=lambda key, ds: pad_batch(ds, batch_size, padding=padding, padded_shapes=pad_shape), # apply reduce funtion to pad
window_size=window_size))
return grouped_dataset
def shufflebatch(
batch_size=256,
shuffle_buffer=None,
repeat=1,
path_to_data="./data/SwissProt/sprot_ints.fasta"
):
"""
Draws from an (optionally shuffled) dataset, repeats dataset repeat times,
and serves batches of the specified size.
"""
path_to_data = os.path.join(path_to_data)
# Parse strings to tensors
dataset = tf.contrib.data.TextLineDataset(path_to_data).map(tf_seq_to_tensor)
if shuffle_buffer:
# Stream elements uniformly randomly from a buffer
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Apply a repeat. Because this is after the shuffle, all elements of the dataset should be seen before repeat.
# See https://stackoverflow.com/questions/44132307/tf-contrib-data-dataset-repeat-with-shuffle-notice-epoch-end-mixed-epochs
dataset = dataset.repeat(count=repeat)
dataset = dataset.batch(batch_size)
return dataset
"""
The trained 1900-dimensional mLSTM babbler.
"""
import tensorflow as tf
import numpy as np
import pandas as pd
import sys
sys.path.append('../')
from UniRep.data_utils import aa_seq_to_int, int_to_aa, bucketbatchpad
import os
# Helpers
def tf_get_shape(tensor):
static_shape = tensor.shape.as_list()
dynamic_shape = tf.unstack(tf.shape(tensor))
dims = [s[1] if s[0] is None else s[0]
for s in zip(static_shape, dynamic_shape)]
return dims
def sample_with_temp(logits, t):
"""
Takes temperature between 0 and 1 -> zero most conservative, 1 most liberal. Samples.
"""
t_adjusted = logits / t # broadcast temperature normalization
softed = tf.nn.softmax(t_adjusted)
# Make a categorical distribution from the softmax and sample
return tf.distributions.Categorical(probs=softed).sample()
def initialize_uninitialized(sess):
"""
from https://stackoverflow.com/questions/35164529/in-tensorflow-is-there-any-way-to-just-initialize-uninitialised-variables
"""
global_vars = tf.global_variables()
is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
# Setup to initialize from the correctly named model files.
class mLSTMCell1900(tf.nn.rnn_cell.RNNCell):
def __init__(self,
num_units,
model_path="./",
wn=True,
scope='mlstm',
var_device='cpu:0',
):
# Really not sure if I should reuse here
super(mLSTMCell1900, self).__init__()
self._num_units = num_units
self._model_path = model_path
self._wn = wn
self._scope = scope
self._var_device = var_device
@property
def state_size(self):
# The state is a tuple of c and h
return (self._num_units, self._num_units)
@property
def output_size(self):
# The output is h
return (self._num_units)
def zero_state(self, batch_size, dtype):
c = tf.zeros([batch_size, self._num_units], dtype=dtype)
h = tf.zeros([batch_size, self._num_units], dtype=dtype)
return (c, h)
def call(self, inputs, state):
# Inputs will be a [batch_size, input_dim] tensor.
# Eg, input_dim for a 10-D embedding is 10
nin = inputs.get_shape()[1].value
# Unpack the state tuple
c_prev, h_prev = state
with tf.variable_scope(self._scope):
wx_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_wx:0.npy"))
wh_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_wh:0.npy"))
wmx_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_wmx:0.npy"))
wmh_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_wmh:0.npy"))
b_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_b:0.npy"))
gx_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_gx:0.npy"))
gh_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_gh:0.npy"))
gmx_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_gmx:0.npy"))
gmh_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_gmh:0.npy"))
wx = tf.get_variable(
"wx", initializer=wx_init)
wh = tf.get_variable(
"wh", initializer=wh_init)
wmx = tf.get_variable(
"wmx", initializer=wmx_init)
wmh = tf.get_variable(
"wmh", initializer=wmh_init)
b = tf.get_variable(
"b", initializer=b_init)
if self._wn:
gx = tf.get_variable(
"gx", initializer=gx_init)
gh = tf.get_variable(
"gh", initializer=gh_init)
gmx = tf.get_variable(
"gmx", initializer=gmx_init)
gmh = tf.get_variable(
"gmh", initializer=gmh_init)
if self._wn:
wx = tf.nn.l2_normalize(wx, dim=0) * gx
wh = tf.nn.l2_normalize(wh, dim=0) * gh
wmx = tf.nn.l2_normalize(wmx, dim=0) * gmx
wmh = tf.nn.l2_normalize(wmh, dim=0) * gmh
m = tf.matmul(inputs, wmx) * tf.matmul(h_prev, wmh)
z = tf.matmul(inputs, wx) + tf.matmul(m, wh) + b
i, f, o, u = tf.split(z, 4, 1)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f * c_prev + i * u
h = o * tf.tanh(c)
return h, (c, h)
class mLSTMCell(tf.nn.rnn_cell.RNNCell):
def __init__(self,
num_units,
wx_init=tf.orthogonal_initializer(),
wh_init=tf.orthogonal_initializer(),
wmx_init=tf.orthogonal_initializer(),
wmh_init=tf.orthogonal_initializer(),
b_init=tf.orthogonal_initializer(),
gx_init=tf.ones_initializer(),
gh_init=tf.ones_initializer(),
gmx_init=tf.ones_initializer(),
gmh_init=tf.ones_initializer(),
wn=True,
scope='mlstm',
var_device='cpu:0',
):
# Really not sure if I should reuse here
super(mLSTMCell, self).__init__()
self._num_units = num_units
self._wn = wn
self._scope = scope
self._var_device = var_device
self._wx_init = wx_init
self._wh_init = wh_init
self._wmx_init = wmx_init
self._wmh_init = wmh_init
self._b_init = b_init
self._gx_init = gx_init
self._gh_init = gh_init
self._gmx_init = gmx_init
self._gmh_init = gmh_init
@property
def state_size(self):
# The state is a tuple of c and h
return (self._num_units, self._num_units)
@property
def output_size(self):
# The output is h
return (self._num_units)
def zero_state(self, batch_size, dtype):
c = tf.zeros([batch_size, self._num_units], dtype=dtype)
h = tf.zeros([batch_size, self._num_units], dtype=dtype)
return (c, h)
def call(self, inputs, state):
# Inputs will be a [batch_size, input_dim] tensor.
# Eg, input_dim for a 10-D embedding is 10
nin = inputs.get_shape()[1].value
# Unpack the state tuple
c_prev, h_prev = state
with tf.variable_scope(self._scope):
wx = tf.get_variable(
"wx", initializer=self._wx_init)
wh = tf.get_variable(
"wh", initializer=self._wh_init)
wmx = tf.get_variable(
"wmx", initializer=self._wmx_init)
wmh = tf.get_variable(
"wmh", initializer=self._wmh_init)
b = tf.get_variable(
"b", initializer=self._b_init)
if self._wn:
gx = tf.get_variable(
"gx", initializer=self._gx_init)
gh = tf.get_variable(
"gh", initializer=self._gh_init)
gmx = tf.get_variable(
"gmx", initializer=self._gmx_init)
gmh = tf.get_variable(
"gmh", initializer=self._gmh_init)
if self._wn:
wx = tf.nn.l2_normalize(wx, dim=0) * gx
wh = tf.nn.l2_normalize(wh, dim=0) * gh
wmx = tf.nn.l2_normalize(wmx, dim=0) * gmx
wmh = tf.nn.l2_normalize(wmh, dim=0) * gmh
m = tf.matmul(inputs, wmx) * tf.matmul(h_prev, wmh)
z = tf.matmul(inputs, wx) + tf.matmul(m, wh) + b
i, f, o, u = tf.split(z, 4, 1)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f * c_prev + i * u
h = o * tf.tanh(c)
return h, (c, h)
class mLSTMCellStackNPY(tf.nn.rnn_cell.RNNCell):
def __init__(self,
num_units=256,
num_layers=4,
dropout=None,
res_connect=False,
wn=True,
scope='mlstm_stack',
var_device='cpu:0',
model_path="./"
):
# Really not sure if I should reuse here
super(mLSTMCellStackNPY, self).__init__()
self._model_path=model_path
self._num_units = num_units
self._num_layers = num_layers
self._dropout = dropout
self._res_connect = res_connect
self._wn = wn
self._scope = scope
self._var_device = var_device
bs = "rnn_mlstm_stack_mlstm_stack" # base scope see weight file names
join = lambda x: os.path.join(self._model_path, x)
layers = [mLSTMCell(
num_units=self._num_units,
wn=self._wn,
scope=self._scope + str(i),
var_device=self._var_device,
wx_init=np.load(join(bs + "{0}_mlstm_stack{1}_wx:0.npy".format(i,i))),
wh_init=np.load(join(bs + "{0}_mlstm_stack{1}_wh:0.npy".format(i,i))),
wmx_init=np.load(join(bs + "{0}_mlstm_stack{1}_wmx:0.npy".format(i,i))),
wmh_init=np.load(join(bs + "{0}_mlstm_stack{1}_wmh:0.npy".format(i,i))),
b_init=np.load(join(bs + "{0}_mlstm_stack{1}_b:0.npy".format(i,i))),
gx_init=np.load(join(bs + "{0}_mlstm_stack{1}_gx:0.npy".format(i,i))),
gh_init=np.load(join(bs + "{0}_mlstm_stack{1}_gh:0.npy".format(i,i))),
gmx_init=np.load(join(bs + "{0}_mlstm_stack{1}_gmx:0.npy".format(i,i))),
gmh_init=np.load(join(bs + "{0}_mlstm_stack{1}_gmh:0.npy".format(i,i)))
) for i in range(self._num_layers)]
if self._dropout:
layers = [
tf.contrib.rnn.DropoutWrapper(
layer, output_keep_prob=1-self._dropout) for layer in layers[:-1]] + layers[-1:]
self._layers = layers
@property
def state_size(self):
# The state is a tuple of c and h
return (
tuple(self._num_units for _ in range(self._num_layers)),
tuple(self._num_units for _ in range(self._num_layers))
)
@property
def output_size(self):
# The output is h
return (self._num_units)
def zero_state(self, batch_size, dtype):
c_stack = tuple(tf.zeros([batch_size, self._num_units], dtype=dtype) for _ in range(self._num_layers))
h_stack = tuple(tf.zeros([batch_size, self._num_units], dtype=dtype) for _ in range(self._num_layers))
return (c_stack, h_stack)
def call(self, inputs, state):
# Inputs will be a [batch_size, input_dim] tensor.
# Eg, input_dim for a 10-D embedding is 10
# Unpack the state tuple
c_prev, h_prev = state
new_outputs = []
new_cs = []
new_hs = []
for i, layer in enumerate(self._layers):
if i == 0:
h, (c,h_state) = layer(inputs, (c_prev[i],h_prev[i]))
else:
h, (c,h_state) = layer(new_outputs[-1], (c_prev[i],h_prev[i]))
new_outputs.append(h)
new_cs.append(c)
new_hs.append(h_state)
if self._res_connect:
# Make sure number of layers does not affect the scale of the output
scale_factor = tf.constant(1 / float(self._num_layers))
final_output = tf.scalar_mul(scale_factor,tf.add_n(new_outputs))
else:
final_output = new_outputs[-1]
return final_output, (tuple(new_cs), tuple(new_hs))
class babbler1900():
def __init__(self,
model_path="./pbab_weights",
batch_size=256
):
self._rnn_size = 1900
self._vocab_size = 26
self._embed_dim = 10
self._wn = True
self._shuffle_buffer = 10000
self._model_path = model_path
self._batch_size = batch_size
self._batch_size_placeholder = tf.placeholder(tf.int32, shape=[], name="batch_size")
self._minibatch_x_placeholder = tf.placeholder(
tf.int32, shape=[None, None], name="minibatch_x")
self._initial_state_placeholder = (
tf.placeholder(tf.float32, shape=[None, self._rnn_size]),
tf.placeholder(tf.float32, shape=[None, self._rnn_size])
)
self._minibatch_y_placeholder = tf.placeholder(
tf.int32, shape=[None, None], name="minibatch_y")
# Batch size dimensional placeholder which gives the
# Lengths of the input sequence batch. Used to index into
# The final_hidden output and select the stop codon -1
# final hidden for the graph operation.
self._seq_length_placeholder = tf.placeholder(
tf.int32, shape=[None], name="seq_len")
self._temp_placeholder = tf.placeholder(tf.float32, shape=[], name="temp")
rnn = mLSTMCell1900(self._rnn_size,
model_path=model_path,
wn=self._wn)
zero_state = rnn.zero_state(self._batch_size, tf.float32)
single_zero = rnn.zero_state(1, tf.float32)
mask = tf.sign(self._minibatch_y_placeholder) # 1 for nonpad, zero for pad
inverse_mask = 1 - mask # 0 for nonpad, 1 for pad
total_padded = tf.reduce_sum(inverse_mask)
pad_adjusted_targets = (self._minibatch_y_placeholder - 1) + inverse_mask
embed_matrix = tf.get_variable(
"embed_matrix", dtype=tf.float32, initializer=np.load(os.path.join(self._model_path, "embed_matrix:0.npy"))
)
embed_cell = tf.nn.embedding_lookup(embed_matrix, self._minibatch_x_placeholder)
self._output, self._final_state = tf.nn.dynamic_rnn(
rnn,
embed_cell,
initial_state=self._initial_state_placeholder,
swap_memory=True,
parallel_iterations=1
)