# This is Transformer
- if you are import the model of the Transformer then used to this import
```
# import the transformer model
>>> from GorparTansformer.model import build_transformer
# how to used this transformer model
>>> build_transformer(
vocab_src_len=vocabulary_source_length, # vocabulary source length of sentence like tokeinzer source length of
vocab_tgt_len=vocabulary_target_length, # same for the target language
src_seq_len=config["seq_len"], # source language length of you sentence like 350
tgt_seq_len=config['seq_len'], # target language length of you sentence same as source length
d_model=config['d_model'] # dimension model your language like 512
)
```
- if you import the Tensor dataset function, which is convert the tensor data from raw data
```
# import the Tensor dataset Function
>>> from GorparTansformer.dataset import BilingualDataset
# how to used this Tensor dataset which is convert to the Tensor of the row data
>>> BilingualDataset(
ds=train_dataset_raw, # raw dataset like='Ram eats mango'
tokenizer_src=tokenizer_source, # source language tokenizer
tokenizer_tgt=tokinzer_target, # target language tokenizer
src_lang=config['lang_src'], # source language like engish
tgt_lang=config['lang_tgt'], # target language like Hindi
seq_len=config['seq_len']) # sequence length like 350
```
Raw data
{
"_id": null,
"home_page": null,
"name": "PTransformer",
"maintainer": null,
"docs_url": null,
"requires_python": null,
"maintainer_email": null,
"keywords": "python, transformer, chat-GPT Transformer",
"author": "ProgramerSalar",
"author_email": "<manishkumar60708090@gmail.com>",
"download_url": "https://files.pythonhosted.org/packages/f9/82/a78a2345a47ada51fc3b9e2f2f0f0b7c75d2ecf5f31aba9a5c23f8943566/ptransformer-0.0.1.2.tar.gz",
"platform": null,
"description": "\r\n# This is Transformer \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n- if you are import the model of the Transformer then used to this import \r\n\r\n```\r\n\r\n# import the transformer model \r\n\r\n>>> from GorparTansformer.model import build_transformer\r\n\r\n\r\n\r\n# how to used this transformer model \r\n\r\n>>> build_transformer(\r\n\r\n vocab_src_len=vocabulary_source_length, # vocabulary source length of sentence like tokeinzer source length of \r\n\r\n vocab_tgt_len=vocabulary_target_length, # same for the target language \r\n\r\n src_seq_len=config[\"seq_len\"], # source language length of you sentence like 350 \r\n\r\n tgt_seq_len=config['seq_len'], # target language length of you sentence same as source length\r\n\r\n d_model=config['d_model'] # dimension model your language like 512\r\n\r\n)\r\n\r\n```\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n- if you import the Tensor dataset function, which is convert the tensor data from raw data \r\n\r\n```\r\n\r\n# import the Tensor dataset Function\r\n\r\n>>> from GorparTansformer.dataset import BilingualDataset\r\n\r\n\r\n\r\n# how to used this Tensor dataset which is convert to the Tensor of the row data \r\n\r\n>>> BilingualDataset(\r\n\r\n ds=train_dataset_raw, # raw dataset like='Ram eats mango'\r\n\r\n tokenizer_src=tokenizer_source, # source language tokenizer \r\n\r\n tokenizer_tgt=tokinzer_target, # target language tokenizer\r\n\r\n src_lang=config['lang_src'], # source language like engish\r\n\r\n tgt_lang=config['lang_tgt'], # target language like Hindi\r\n\r\n seq_len=config['seq_len']) # sequence length like 350\r\n\r\n```\r\n\r\n\r\n\r\n\r\n\r\n",
"bugtrack_url": null,
"license": null,
"summary": "This is Ai Transformer",
"version": "0.0.1.2",
"project_urls": null,
"split_keywords": [
"python",
" transformer",
" chat-gpt transformer"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "4d04811567a2b87c80735bfed89275ddaf364580be974923c2ea9b9cf029f253",
"md5": "f2d310bae7c05351c2ccda164a7c1e82",
"sha256": "b79f7619a419488144c76ba7c529d3b96f56e0bf8ef254e457f30ca8546aa281"
},
"downloads": -1,
"filename": "PTransformer-0.0.1.2-py3-none-any.whl",
"has_sig": false,
"md5_digest": "f2d310bae7c05351c2ccda164a7c1e82",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": null,
"size": 5878,
"upload_time": "2024-09-08T14:26:41",
"upload_time_iso_8601": "2024-09-08T14:26:41.038699Z",
"url": "https://files.pythonhosted.org/packages/4d/04/811567a2b87c80735bfed89275ddaf364580be974923c2ea9b9cf029f253/PTransformer-0.0.1.2-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "f982a78a2345a47ada51fc3b9e2f2f0f0b7c75d2ecf5f31aba9a5c23f8943566",
"md5": "aab3b9a875b1025ef97f2c289751cc6a",
"sha256": "146c2c92a187b16a94c8ecade8ae6514ebcb0926f8a6f172a97826b95ebfa3b8"
},
"downloads": -1,
"filename": "ptransformer-0.0.1.2.tar.gz",
"has_sig": false,
"md5_digest": "aab3b9a875b1025ef97f2c289751cc6a",
"packagetype": "sdist",
"python_version": "source",
"requires_python": null,
"size": 5643,
"upload_time": "2024-09-08T14:26:43",
"upload_time_iso_8601": "2024-09-08T14:26:43.477554Z",
"url": "https://files.pythonhosted.org/packages/f9/82/a78a2345a47ada51fc3b9e2f2f0f0b7c75d2ecf5f31aba9a5c23f8943566/ptransformer-0.0.1.2.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-09-08 14:26:43",
"github": false,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"lcname": "ptransformer"
}