diff --git a/pgml-extension/examples/finetune.sql b/pgml-extension/examples/finetune.sql new file mode 100644 index 000000000..ca52acfea --- /dev/null +++ b/pgml-extension/examples/finetune.sql @@ -0,0 +1,90 @@ +-- Exit on error (psql) +\set ON_ERROR_STOP true +\timing on + + +SELECT pgml.load_dataset('kde4', kwargs => '{"lang1": "en", "lang2": "es"}'); +CREATE OR REPLACE VIEW kde4_en_to_es AS +SELECT translation->>'en' AS "en", translation->>'es' AS "es" +FROM pgml.kde4 +LIMIT 10; +SELECT pgml.tune( + 'Translate English to Spanish', + task => 'translation', + relation_name => 'kde4_en_to_es', + y_column_name => 'es', -- translate into spanish + model_name => 'Helsinki-NLP/opus-mt-en-es', + hyperparams => '{ + "learning_rate": 2e-5, + "per_device_train_batch_size": 16, + "per_device_eval_batch_size": 16, + "num_train_epochs": 1, + "weight_decay": 0.01, + "max_length": 128 + }', + test_size => 0.5, + test_sampling => 'last' +); + +SELECT pgml.load_dataset('imdb'); +SELECT pgml.tune( + 'IMDB Review Sentiment', + task => 'text-classification', + relation_name => 'pgml.imdb', + y_column_name => 'label', + model_name => 'distilbert-base-uncased', + hyperparams => '{ + "learning_rate": 2e-5, + "per_device_train_batch_size": 16, + "per_device_eval_batch_size": 16, + "num_train_epochs": 1, + "weight_decay": 0.01 + }', + test_size => 0.5, + test_sampling => 'last' +); +SELECT pgml.predict('IMDB Review Sentiment', 'I love SQL'); + +SELECT pgml.load_dataset('squad_v2'); +SELECT pgml.tune( + 'SQuAD Q&A v2', + 'question-answering', + 'pgml.squad_v2', + 'answers', + 'deepset/roberta-base-squad2', + hyperparams => '{ + "evaluation_strategy": "epoch", + "learning_rate": 2e-5, + "per_device_train_batch_size": 16, + "per_device_eval_batch_size": 16, + "num_train_epochs": 1, + "weight_decay": 0.01, + "max_length": 384, + "stride": 128 + }', + test_size => 11873, + test_sampling => 'last' +); + + +SELECT pgml.load_dataset('billsum', kwargs => '{"split": "ca_test"}'); +CREATE OR REPLACE VIEW billsum_training_data +AS SELECT title || '\n' || text AS text, summary FROM pgml.billsum; +SELECT pgml.tune( + 'Legal Summarization', + task => 'summarization', + relation_name => 'billsum_training_data', + y_column_name => 'summary', + model_name => 'sshleifer/distilbart-xsum-12-1', + hyperparams => '{ + "learning_rate": 2e-5, + "per_device_train_batch_size": 2, + "per_device_eval_batch_size": 2, + "num_train_epochs": 1, + "weight_decay": 0.01, + "max_input_length": 1024, + "max_summary_length": 128 + }', + test_size => 0.01, + test_sampling => 'last' +); diff --git a/pgml-extension/examples/transformers.sql b/pgml-extension/examples/transformers.sql index 36f019350..e7fabbb7d 100644 --- a/pgml-extension/examples/transformers.sql +++ b/pgml-extension/examples/transformers.sql @@ -32,89 +32,60 @@ SELECT pgml.transform( 'Dominic Cobb is the foremost practitioner of the artistic science of extraction, inserting oneself into a subject''s dreams to obtain hidden information without the subject knowing, a concept taught to him by his professor father-in-law, Dr. Stephen Miles. Dom''s associates are Miles'' former students, who Dom requires as he has given up being the dream architect for reasons he won''t disclose. Dom''s primary associate, Arthur, believes it has something to do with Dom''s deceased wife, Mal, who often figures prominently and violently in those dreams, or Dom''s want to "go home" (get back to his own reality, which includes two young children). Dom''s work is generally in corporate espionage. As the subjects don''t want the information to get into the wrong hands, the clients have zero tolerance for failure. Dom is also a wanted man, as many of his past subjects have learned what Dom has done to them. One of those subjects, Mr. Saito, offers Dom a job he can''t refuse: to take the concept one step further into inception, namely planting thoughts into the subject''s dreams without them knowing. Inception can fundamentally alter that person as a being. Saito''s target is Robert Michael Fischer, the heir to an energy business empire, which has the potential to rule the world if continued on the current trajectory. Beyond the complex logistics of the dream architecture of the case and some unknowns concerning Fischer, the biggest obstacles in success for the team become worrying about one aspect of inception which Cobb fails to disclose to the other team members prior to the job, and Cobb''s newest associate Ariadne''s belief that Cobb''s own subconscious, especially as it relates to Mal, may be taking over what happens in the dreams.' ] ); +SELECT pgml.transform( + inputs => ARRAY[ + 'I love how amazingly simple ML has become!', + 'I hate doing mundane and thankless tasks. ☹️' + ], + task => '{"task": "text-classification", + "model": "finiteautomata/bertweet-base-sentiment-analysis" + }'::JSONB +) AS positivity; -SELECT pgml.load_dataset('kde4', kwargs => '{"lang1": "en", "lang2": "es"}'); -CREATE OR REPLACE VIEW kde4_en_to_es AS -SELECT translation->>'en' AS "en", translation->>'es' AS "es" -FROM pgml.kde4 -LIMIT 10; -SELECT pgml.tune( - 'Translate English to Spanish', - task => 'translation', - relation_name => 'kde4_en_to_es', - y_column_name => 'es', -- translate into spanish - model_name => 'Helsinki-NLP/opus-mt-en-es', - hyperparams => '{ - "learning_rate": 2e-5, - "per_device_train_batch_size": 16, - "per_device_eval_batch_size": 16, - "num_train_epochs": 1, - "weight_decay": 0.01, - "max_length": 128 - }', - test_size => 0.5, - test_sampling => 'last' -); +SELECT pgml.transform( + task => 'text-classification', + inputs => ARRAY[ + 'I love how amazingly simple ML has become!', + 'I hate doing mundane and thankless tasks. ☹️' + ] +) AS positivity; + +SELECT pgml.transform( + inputs => ARRAY[ + 'Stocks rallied and the British pound gained.', + 'Stocks making the biggest moves midday: Nvidia, Palantir and more' + ], + task => '{"task": "text-classification", + "model": "ProsusAI/finbert" + }'::JSONB +) AS market_sentiment; -SELECT pgml.load_dataset('imdb'); -SELECT pgml.tune( - 'IMDB Review Sentiment', - task => 'text-classification', - relation_name => 'pgml.imdb', - y_column_name => 'label', - model_name => 'distilbert-base-uncased', - hyperparams => '{ - "learning_rate": 2e-5, - "per_device_train_batch_size": 16, - "per_device_eval_batch_size": 16, - "num_train_epochs": 1, - "weight_decay": 0.01 - }', - test_size => 0.5, - test_sampling => 'last' +SELECT pgml.transform( + inputs => ARRAY[ + 'I have a problem with my iphone that needs to be resolved asap!!' + ], + task => '{"task": "zero-shot-classification", + "model": "roberta-large-mnli" + }'::JSONB, + args => '{"candidate_labels": ["urgent", "not urgent", "phone", "tablet", "computer"] + }'::JSONB +) AS zero_shot; + +SELECT pgml.transform( + inputs => ARRAY[ + 'Hugging Face is a French company based in New York City.' + ], + task => 'token-classification' ); -SELECT pgml.predict('IMDB Review Sentiment', 'I love SQL'); -SELECT pgml.load_dataset('squad_v2'); -SELECT pgml.tune( - 'SQuAD Q&A v2', +SELECT pgml.transform( 'question-answering', - 'pgml.squad_v2', - 'answers', - 'deepset/roberta-base-squad2', - hyperparams => '{ - "evaluation_strategy": "epoch", - "learning_rate": 2e-5, - "per_device_train_batch_size": 16, - "per_device_eval_batch_size": 16, - "num_train_epochs": 1, - "weight_decay": 0.01, - "max_length": 384, - "stride": 128 - }', - test_size => 11873, - test_sampling => 'last' -); + inputs => ARRAY[ + '{ + "question": "Am I dreaming?", + "context": "I got a good nights sleep last night and started a simple tutorial over my cup of morning coffee. The capabilities seem unreal, compared to what I came to expect from the simple SQL standard I studied so long ago. The answer is staring me in the face, and I feel the uncanny call from beyond the screen to check the results." + }' + ] +) AS answer; -SELECT pgml.load_dataset('billsum', kwargs => '{"split": "ca_test"}'); -CREATE OR REPLACE VIEW billsum_training_data -AS SELECT title || '\n' || text AS text, summary FROM pgml.billsum; -SELECT pgml.tune( - 'Legal Summarization', - task => 'summarization', - relation_name => 'billsum_training_data', - y_column_name => 'summary', - model_name => 'sshleifer/distilbart-xsum-12-1', - hyperparams => '{ - "learning_rate": 2e-5, - "per_device_train_batch_size": 2, - "per_device_eval_batch_size": 2, - "num_train_epochs": 1, - "weight_decay": 0.01, - "max_input_length": 1024, - "max_summary_length": 128 - }', - test_size => 0.01, - test_sampling => 'last' -); diff --git a/pgml-extension/src/bindings/transformers.py b/pgml-extension/src/bindings/transformers.py index 43040f42a..da109b9f2 100644 --- a/pgml-extension/src/bindings/transformers.py +++ b/pgml-extension/src/bindings/transformers.py @@ -3,7 +3,7 @@ import math import shutil import time - +import numpy as np import datasets from rouge import Rouge @@ -40,6 +40,12 @@ __cache_transformer_by_model_id = {} __cache_sentence_transformer_by_name = {} +class NumpyJSONEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.float32): + return float(obj) + return super().default(obj) + def transform(task, args, inputs): task = json.loads(task) args = json.loads(args) @@ -50,7 +56,7 @@ def transform(task, args, inputs): if pipe.task == "question-answering": inputs = [json.loads(input) for input in inputs] - return json.dumps(pipe(inputs, **args)) + return json.dumps(pipe(inputs, **args), cls = NumpyJSONEncoder) def embed(transformer, text, kwargs): kwargs = json.loads(kwargs) @@ -101,7 +107,7 @@ def tokenize_summarization(tokenizer, max_length, x, y): return datasets.Dataset.from_dict(encoding.data) def tokenize_text_generation(tokenizer, max_length, y): - encoding = tokenizer(y, max_length=max_length) + encoding = tokenizer(y, max_length=max_length, truncation=True, padding="max_length") return datasets.Dataset.from_dict(encoding.data) def tokenize_question_answering(tokenizer, max_length, x, y): diff --git a/pgml-extension/tests/test.sql b/pgml-extension/tests/test.sql index db89f25e6..ed14c510d 100644 --- a/pgml-extension/tests/test.sql +++ b/pgml-extension/tests/test.sql @@ -27,3 +27,4 @@ SELECT pgml.load_dataset('wine'); \i examples/multi_classification.sql \i examples/regression.sql \i examples/vectors.sql +