I just tried out the new Vision Transformer (ViT) on TensorFlow Hub by adding two lines of code in the Custom component. Below is the full code, but the only lines I added was:
import tensorflow_hub as hub
and
input_=hub.KerasLayer(“https://tfhub.dev/sayakpaul/vit_s16_classification/1”)(input_)
NOTE: I had to install (pip install tensorflow_hub) in the same environment as perceptilabs was installed.
import tensorflow_hub as hub
class LayerCustom_LayerCustom_1Keras(tf.keras.layers.Layer, PerceptiLabsVisualizer):
def call(self, inputs, training=True):
""" Takes a tensor and one-hot encodes it """
input_ = inputs['input']
input_=hub.KerasLayer("https://tfhub.dev/sayakpaul/vit_s16_classification/1")(input_)
output = preview = input_
self._outputs = {
'output': output,
'preview': output,
}
return self._outputs
def get_config(self):
"""Any variables belonging to this layer that should be rendered in the frontend.
Returns:
A dictionary with tensor names for keys and picklable for values.
"""
return {}
@property
def visualized_trainables(self):
""" Returns two tf.Variables (weights, biases) to be visualized in the frontend """
return tf.constant(0), tf.constant(0)
class LayerCustom_LayerCustom_1(Tf2xLayer):
def __init__(self):
super().__init__(
keras_class=LayerCustom_LayerCustom_1Keras
)