-
Notifications
You must be signed in to change notification settings - Fork 2
/
ai_tools.nu
2661 lines (2292 loc) · 90.6 KB
/
ai_tools.nu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#ai tools
export def "ai help" [] {
print (
echo ["This set of tools need a few dependencies installed:"
"ffmpeg, whisper, yt-dlp, gcalcli."
""
"METHODS"
"- chat_gpt"
"- askai"
"- ai audio2text"
"- ai video2text"
"- ai screen2text"
"- ai audio2summary"
"- ai transcription-summary"
"- ai yt-summary"
"- ai media-summary"
"- ai generate-subtitles"
"- ai git-push"
"- ai google_search-summary"
"- dall_e"
"- askdalle"
"- ai tts"
"- tts"
"- google_ai"
"- gcal ai"
"- ai trans"
"- ai google_search-summary"
"- ai trans-subs"
"- claude_ai"
]
| str join "\n"
| nu-highlight
)
}
#calculate aprox words per tokens
#100 tokens about 60-80 words
export def token2word [
tokens:int
--min(-m):int = 60
--max(-M):int = 80
--rate(-r):int = 100
] {
let token_units = $tokens / $rate
math prod-list [$token_units $token_units] [$min $max]
}
#upload a file to chatpdf server
export def "chatpdf add" [
file:string #filename with extension
label?:string #label for the pdf (default is downcase filename with underscores as spaces)
--notify(-n) #notify to android via join/tasker
] {
let file = get-input $in $file -n
if ($file | path parse | get extension | str downcase) != pdf {
return-error "wrong file type, it must be a pdf!"
}
let api_key = $env.MY_ENV_VARS.api_keys.chatpdf.api_key
let database_file = $env.MY_ENV_VARS.chatgpt_config | path join chatpdf_ids.json
let database = open $database_file
let url = "https://api.chatpdf.com/v1/sources/add-file"
let filename = ($file | path parse | get stem | str downcase | str replace -a " " "_")
let filepath = ($file | path expand)
if ($filename in ($database | columns)) {
return-error "there is already a file with the same name already uploaded!"
}
if (not ($label | is-empty)) and ($label in ($database | columns)) {
return-error "there is already a file with the same label already uploaded!"
}
let filename = get-input $filename $label
# let header = $"x-api-key: ($api_key)"
# let response = curl -s -X POST $url -H $header -F $"file=@($filepath)" | from json
# AQUI
let header = ["x-api-key" $api_key]
let response = http post -H $header -t multipart/form-data $url { file: (open -r $filepath) } --allow-errors
if ($response | is-empty) {
return-error "empty response!"
} else if ("sourceId" not-in ($response | columns) ) {
return-error $response.message
}
let id = ($response | get sourceId)
$database | upsert $filename $id | save -f $database_file
if $notify {"upload finished!" | tasker send-notification}
}
#delete a file from chatpdf server
export def "chatpdf del" [
] {
let api_key = $env.MY_ENV_VARS.api_keys.chatpdf.api_key
let database_file = $env.MY_ENV_VARS.chatgpt_config | path join chatpdf_ids.json
let database = open $database_file
let selection = $database | columns | sort | input list -f (echo-g "Select file to delete:")
let url = "https://api.chatpdf.com/v1/sources/delete"
let data = {"sources": [($database | get $selection)]}
let header = ["x-api-key" $api_key]
let response = http post $url -t application/json $data -H $header
$database | reject $selection | save -f $database_file
}
#chat with a pdf via chatpdf
export def "chatpdf ask" [
prompt?:string #question to the pdf
--select_pdf(-s):string #specify which book to ask via filename (without extension)
] {
let prompt = get-input $in $prompt
let api_key = $env.MY_ENV_VARS.api_keys.chatpdf.api_key
let database_file = $env.MY_ENV_VARS.chatgpt_config | path join chatpdf_ids.json
let database = open $database_file
let selection = (
if ($select_pdf | is-empty) {
$database
| columns
| sort
| input list -f (echo-g "Select pdf to ask a question:")
} else {
$select_pdf
| str downcase
| str replace -a " " "_"
}
)
if ($selection not-in ($database | columns)) {
return-error "pdf not found in server!"
}
let url = "https://api.chatpdf.com/v1/chats/message"
let header = ["x-api-key", ($api_key)]
let request = {
"referenceSources": true,
"sourceId": ($database | get $selection),
"messages": [
{
"role": "user",
"content": $prompt
}
]
}
let answer = http post -t application/json -H $header $url $request
return $answer.content
}
#fast call to chatpdf ask
export def askpdf [
prompt? #question to ask to the pdf
--rubb(-r) #use rubb file, otherwhise select from list
--btx(-b) #use btx file, otherwhise select from list
--fast(-f) #get prompt from ~/Yandex.Disk/ChatGpt/prompt.md and save response to ~/Yandex.Disk/ChatGpt/answer.md
] {
let prompt = if $fast {
open ($env.MY_ENV_VARS.chatgpt | path join prompt.md)
} else {
get-input $in $prompt
}
let answer = (
match [$rubb,$btx] {
[true,true] => {return-error "only one of these flags allowed!"},
[true,false] => {chatpdf ask $prompt -s rubb},
[false,true] => {chatpdf ask ((open ([$env.MY_ENV_VARS.chatgpt_config prompt chatpdf_btx.md] | path join)) + "\n" + $prompt) -s btx},
[false,false] => {chatpdf ask $prompt}
}
)
if $fast {
$answer | save -f ($env.MY_ENV_VARS.chatgpt | path join answer.md)
} else {
return $answer
}
}
#list uploaded documents
export def "chatpdf list" [] {
open ($env.MY_ENV_VARS.chatgpt_config | path join chatpdf_ids.json) | columns
}
#single call chatgpt wrapper
#
#Available models at https://platform.openai.com/docs/models, but some of them are:
# - gpt-4o (128000 tokens)
# - gpt-4-turbo (128000 tokens)
# - gpt-4-vision (128000 tokens), points to gpt-4-turbo.
# - o1-preview (128000 tokens)
# - o1-mini (128000 tokens)
# - gpt-4o-mini (128000 tokens)
# - gpt-4-32k (32768 tokens)
# - gpt-3.5-turbo (16385 tokens)
# - text-davinci-003 (4097 tokens)
#
#system messages are available in:
# [$env.MY_ENV_VARS.chatgpt_config system] | path join
#
#pre_prompts are available in:
# [$env.MY_ENV_VARS.chatgpt_config prompt] | path join
#
#Note that:
# - --select_system > --list_system > --system
# - --select_preprompt > --pre_prompt
export def chat_gpt [
query?: string # the query to Chat GPT
--model(-m):string = "gpt-4o-mini" # the model gpt-4o-mini, gpt-4o = gpt-4, etc
--system(-s):string = "You are a helpful assistant." # system message
--temp(-t): float = 0.9 # the temperature of the model
--image(-i):string # filepath of image file for gpt-4-vision
--list_system(-l) # select system message from list
--pre_prompt(-p) # select pre-prompt from list
--delim_with_backquotes(-d) # to delimit prompt (not pre-prompt) with triple backquotes (')
--select_system: string # directly select system message
--select_preprompt: string # directly select pre_prompt
--web_search(-w) = false #include $web_results web search results in the prompt
--web_results(-W):int = 5 #number of web results to include
--document:string #use provided document to retrieve answer
] {
let query = get-input $in $query
if ($query | is-empty) {
return-error "Empty prompt!!!"
}
if ($model == "gpt-4-vision") and ($image | is-empty) {
return-error "gpt-4-vision needs and image file!"
}
if ($model == "gpt-4-vision") and (not ($image | path expand | path exists)) {
return-error "image file not found!"
}
let extension = (
if $model == "gpt-4-vision" {
$image | path parse | get extension
} else {
""
}
)
let image = (
if $model == "gpt-4-vision" {
open ($image | path expand) | encode base64
} else {
""
}
)
#select system message from database
let system_messages_files = ls ($env.MY_ENV_VARS.chatgpt_config | path join system) | sort-by name | get name
let system_messages = $system_messages_files | path parse | get stem
mut ssystem = ""
if $list_system {
let selection = ($system_messages | input list -f (echo-g "Select system message: "))
$ssystem = (open ($system_messages_files | find ("/" + $selection + ".md") | get 0 | ansi strip))
} else if (not ($select_system | is-empty)) {
try {
$ssystem = (open ($system_messages_files | find ("/" + $select_system + ".md") | get 0 | ansi strip))
}
}
let system = if ($ssystem | is-empty) {$system} else {$ssystem}
#select pre-prompt from database
let pre_prompt_files = ls ($env.MY_ENV_VARS.chatgpt_config | path join prompt) | sort-by name | get name
let pre_prompts = $pre_prompt_files | path parse | get stem
mut preprompt = ""
if $pre_prompt {
let selection = ($pre_prompts | input list -f (echo-g "Select pre-prompt: "))
$preprompt = (open ($pre_prompt_files | find ("/" + $selection + ".md") | get 0 | ansi strip))
} else if (not ($select_preprompt | is-empty)) {
try {
$preprompt = (open ($pre_prompt_files | find ("/" + $select_preprompt + ".md") | get 0 | ansi strip))
}
}
#build prompt
let prompt = (
if ($document | is-not-empty) {
$preprompt + "\n# DOCUMENT\n\n" + (open $document) + "\n\n# INPUT\n\n'''\n" + $query + "\n'''"
} else if ($preprompt | is-empty) and $delim_with_backquotes {
"'''" + "\n" + $query + "\n" + "'''"
} else if ($preprompt | is-empty) {
$query
} else if $delim_with_backquotes {
$preprompt + "\n" + "'''" + "\n" + $query + "\n" + "'''"
} else {
$preprompt + $query
}
)
#search prompts
let search_prompt = "From the next question delimited by triple single quotes ('''), please extract one sentence appropriated for a google search. Deliver your response in plain text without any formatting nor commentary on your part, and in the ORIGINAL language of the question. The question:\n'''" + $prompt + "\n'''"
let search = if $web_search {google_ai $search_prompt -t 0.2 | lines | first} else {""}
let web_content = if $web_search {google_search $search -n $web_results -v} else {""}
let web_content = if $web_search {ai google_search-summary $prompt $web_content -G -m} else {""}
let prompt = (
if $web_search {
$prompt + "\n\n You can complement your answer with the following up to date information about my question I obtained from a google search, in markdown format:\n" + $web_content
} else {
$prompt
}
)
# default models
let model = if $model == "gpt-4" {"gpt-4o"} else {$model}
let model = if $model == "gpt-4-vision" {"gpt-4-turbo"} else {$model}
# call to api
let header = [Authorization $"Bearer ($env.MY_ENV_VARS.api_keys.open_ai.api_key)"]
let site = "https://api.openai.com/v1/chat/completions"
let image_url = ("data:image/" + $extension + ";base64," + $image)
let request = (
if $model == "gpt-4-vision" {
{
model: $model,
messages: [
{
role: "system"
content: $system
},
{
role: "user"
content: [
{
"type": "text",
"text": $prompt
},
{
"type": "image_url",
"image_url":
{
"url": $image_url
}
}
]
}
],
temperature: $temp,
max_tokens: 16384
}
} else {
{
model: $model,
messages: [
{
role: "system"
content: $system
},
{
role: "user"
content: $prompt
}
],
temperature: $temp
}
}
)
let answer = http post -t application/json -H $header $site $request -e
return $answer.choices.0.message.content
}
#fast call to the chat_gpt and gemini wrappers
#
#Only one system message flag allowed.
#
#--gpt4 and --gemini are mutually exclusive flags.
#
#Uses chatgpt by default
#
#if --force and --chat are used together, first prompt is taken from file
#
#For more personalization use `chat_gpt` or `gemini`
export def askai [
prompt?:string # string with the prompt, can be piped
system?:string # string with the system message. It has precedence over the s.m. flags
--programmer(-P) # use programmer s.m with temp 0.75, else use assistant with temp 0.9
--teacher(-T) # use school teacher s.m with temp 0.95, else use assistant with temp 0.9
--rubb(-R) # use rubb s.m. with temperature 0.65, else use assistant with temp 0.9
--biblical(-B) # use biblical assistant s.m with temp 0.78
--math_teacher(-M) # use undergraduate and postgraduate math teacher s.m. with temp 0.95
--google_assistant(-O) # use gOogle assistant (with web search) s.m with temp 0.7
--engineer(-E) # use prompt_engineer s.m. with temp 0.8 and its preprompt
--academic(-A) # use academic writer improver s.m with temp 0.78, and its preprompt
--fix_bug(-F) # use programmer s.m. with temp 0.75 and fix_code_bug preprompt
--summarizer(-S) #use simple summarizer s.m with temp 0.70 and its preprompt
--linux_expert(-L) #use linux expert s.m with temp temp 0.85
--list_system(-l) # select s.m from list (takes precedence over flags)
--list_preprompt(-p) # select pre-prompt from list (pre-prompt + ''' + prompt + ''')
--delimit_with_quotes(-q) = true #add ''' before and after prompt
--temperature(-t):float # takes precedence over the 0.7 and 0.9
--gpt4(-g) # use gpt-4o instead of gpt-4o-mini (default)
--vision(-v) # use gpt-4-vision/gemini-pro-vision
--image(-i):string # filepath of the image to prompt to vision models
--fast(-f) # get prompt from prompt.md file and save response to answer.md
--gemini(-G) #use google gemini instead of chatgpt. gemini-1.5-flash for chat, gemini-1.5-pro otherwise
--bison(-b) #use google bison instead of chatgpt (needs --gemini)
--chat(-c) #use chat mode (text only). Only else valid flags: --gemini, --gpt4
--database(-D) #load chat conversation from database
--web_search(-w) #include web search results into the prompt
--web_results(-W):int = 5 #how many web results to include
--document(-d):string # answer question from provided document
--claude(-C) #use anthropic claude 3.5
] {
let prompt = if $fast {
open ($env.MY_ENV_VARS.chatgpt | path join prompt.md)
} else {
get-input $in $prompt
}
if ($prompt | is-empty) and not $chat {
return-error "no prompt provided!"
}
if $gpt4 and $gemini {
return-error "Please select only one ai system!"
}
if $bison and (not $gemini) {
return-error "--bison needs --gemini!"
}
if $vision and ($image | is-empty) {
return-error "vision models need and image file!"
}
let temp = if ($temperature | is-empty) {
if $programmer or $fix_bug {
0.75
} else if $teacher or $math_teacher {
0.95
} else if $engineer {
0.8
} else if $rubb {
0.65
} else if $academic or $biblical {
0.78
} else if $linux_expert {
0.85
} else if $summarizer or $google_assistant {
0.7
} else {
0.9
}
} else {
$temperature
}
let system = (
if ($system | is-empty) {
if $list_system {
""
} else if $programmer or $fix_bug {
"programmer"
} else if $teacher {
"school_teacher"
} else if $engineer {
"prompt_engineer"
} else if $rubb {
"rubb_2024"
} else if $academic {
"academic_writer_improver"
} else if $biblical {
"biblical_assistant"
} else if $summarizer {
"simple_summarizer"
} else if ($document | is-not-empty) {
"document_expert"
} else if $linux_expert {
"linux_expert"
} else if $math_teacher {
"math_teacher"
} else if $google_assistant {
"google_assistant"
} else {
"assistant"
}
} else {
$system
}
)
let pre_prompt = (
if $academic {
"improve_academic_writing"
} else if $summarizer {
"simple_summary"
} else if ($document | is-not-empty) {
"document_answer"
} else if $engineer {
"meta_prompt"
} else if $fix_bug {
"fix_code_bug"
} else {
"empty"
}
)
#chat mode
if $chat {
if $gemini {
google_ai $prompt -c -D $database -t $temp --select_system $system -p $list_preprompt -l $list_system -d false -w $web_search -W $web_results --select_preprompt $pre_prompt --document $document
} else {
# chat_gpt $prompt -c -D $database -t $temp --select_system $system -p $list_preprompt -l $list_system -d $delimit_with_quotes
print (echo-g "in progress for chatgpt and claude")
}
return
}
# question mode
#use google
if $gemini {
let answer = (
if $vision {
google_ai $prompt -t $temp -l $list_system -m gemini-pro-vision -p $list_preprompt -d true -i $image --select_preprompt $pre_prompt --select_system $system
} else {
match $bison {
true => {google_ai $prompt -t $temp -l $list_system -p $list_preprompt -m text-bison-001 -d true -w $web_search -W $web_results --select_preprompt $pre_prompt --select_system $system --document $document},
false => {google_ai $prompt -t $temp -l $list_system -p $list_preprompt -m gemini-1.5-pro -d true -w $web_search -W $web_results --select_preprompt $pre_prompt --select_system $system --document $document},
}
}
)
if $fast {
$answer | save -f ($env.MY_ENV_VARS.chatgpt | path join answer.md)
return
} else {
return $answer
}
}
#use claude
if $claude {
let answer = (
if $vision {
claude_ai $prompt -t $temp -l $list_system -p $list_preprompt -m claude-vision -d true -i $image --select_preprompt $pre_prompt --select_system $system -w $web_search -W $web_results
} else {
claude_ai $prompt -t $temp -l $list_system -p $list_preprompt -m claude-3.5 -d true --select_preprompt $pre_prompt --select_system $system --document $document -w $web_search -W $web_results
}
)
if $fast {
$answer | save -f ($env.MY_ENV_VARS.chatgpt | path join answer.md)
return
} else {
return $answer
}
}
#use chatgpt
let answer = (
if $vision {
match [$list_system,$list_preprompt] {
[true,true] => {chat_gpt $prompt -t $temp -l -m gpt-4-vision -p -d -i $image},
[true,false] => {chat_gpt $prompt -t $temp -l -m gpt-4-vision -i $image},
[false,true] => {chat_gpt $prompt -t $temp --select_system $system -m gpt-4-vision -p -d -i $image},
[false,false] => {chat_gpt $prompt -t $temp --select_system $system -m gpt-4-vision -i $image},
}
} else {
match [$gpt4,$list_system,$list_preprompt] {
[true,true,false] => {chat_gpt $prompt -t $temp -l -m gpt-4 --select_preprompt $pre_prompt -w $web_search -W $web_results},
[true,false,false] => {chat_gpt $prompt -t $temp --select_system $system -m gpt-4 --select_preprompt $pre_prompt -w $web_search -W $web_results},
[false,true,false] => {chat_gpt $prompt -t $temp -l --select_preprompt $pre_prompt -w $web_search -W $web_results},
[false,false,false] => {chat_gpt $prompt -t $temp --select_system $system --select_preprompt $pre_prompt -w $web_search -W $web_results},
[true,true,true] => {chat_gpt $prompt -t $temp -l -m gpt-4 -p -d -w $web_search -W $web_results},
[true,false,true] => {chat_gpt $prompt -t $temp --select_system $system -m gpt-4 -p -d -w $web_search -W $web_results},
[false,true,true] => {chat_gpt $prompt -t $temp -l -p -d -w $web_search -W $web_results},
[false,false,true] => {chat_gpt $prompt -t $temp --select_system $system -p -d -w $web_search -W $web_results}
}
}
)
if $fast {
$answer | save -f ($env.MY_ENV_VARS.chatgpt | path join answer.md)
return
} else {
return $answer
}
}
#alias for bard
export alias bard = askai -c -G -W 2
#generate a git commit message via chatgpt and push the changes
#
#Inspired by https://github.com/zurawiki/gptcommit
export def "ai git-push" [
--gpt4(-g) # use gpt-4o instead of gpt-4o-mini
--gemini(-G) #use google gemini-1.5-pro model
--claude(-C) #use antropic claude-3-5-sonnet-latest
] {
if $gpt4 and $gemini {
return-error "select only one model!"
}
let max_words = if $gemini {700000} else if $claude {150000} else {100000}
let max_words_short = if $gemini {700000} else if $claude {150000} else {100000}
let model = if $gemini {"gemini"} else if $claude {"claude"} else {"chatgpt"}
print (echo-g $"asking ($model) to summarize the differences in the repository...")
let question = (git diff | str replace "\"" "'" -a)
let prompt = $question | str truncate -m $max_words
let prompt_short = $question | str truncate -m $max_words_short
let commit = (
try {
match [$gpt4,$gemini] {
[true,false] => {
try {
chat_gpt $question -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff -d -m gpt-4
} catch {
try {
chat_gpt $prompt -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff -d -m gpt-4
} catch {
chat_gpt $prompt_short -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff_short -d -m gpt-4
}
}
},
[false,false] => {
try {
chat_gpt $question -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff -d
} catch {
try {
chat_gpt $prompt -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff -d
} catch {
chat_gpt $prompt_short -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff_short -d
}
}
},
[false,true] => {
try {
google_ai $question -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff -d true -m gemini-1.5-pro
} catch {
try {
google_ai $prompt -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff -d true -m gemini-1.5-pro
} catch {
google_ai $prompt_short -t 0.5 --select_system get_diff_summarizer --select_preprompt summarize_git_diff_short -d true -m gemini-1.5-pro
}
}
}
}
} catch {
input (echo-g $"Something happened with ($model). Enter your commit message or leave empty to stop: ")
}
)
if ($commit | is-empty) {
return-error "Execution stopped by the user!"
}
#errors now give a record instead of empty string
if ($commit | typeof) != "string" {
input (echo-g $"Something happened with ($model). Enter your commit message or leave empty to stop: ")
}
print (echo-g "resulting commit message:")
print (echo $commit)
print (echo-g "pushing the changes with that commit message...\n")
let branch = (
git status
| lines
| first
| parse "On branch {branch}"
| str trim
| get branch
| get 0
)
git add -A
git status
git commit -am $commit
try {
git push origin $branch
} catch {
git push --set-upstream origin $branch
}
}
#audio to text transcription via whisper
export def "ai audio2text" [
filename #audio file input
--language(-l) = "Spanish" #language of audio file
--output_format(-o) = "txt" #output format: txt, vtt, srt, tsv, json, all
--translate(-t) #translate audio to english
--filter_noise(-f) = false #filter noise
--notify(-n) #notify to android via join/tasker
] {
let file = $filename | path parse | get stem
mut start = ""
mut end = ""
if $filter_noise {
print (echo-g $"reproduce ($filename) and select start and end time for noise segment, leave empty if no noise..." )
$start = (input "start? (hh:mm:ss): ")
$end = (input "end? (hh:mm:ss): ")
}
if ($start | is-empty) or ($end | is-empty) {
print (echo-g "generating temp file...")
if ($filename | path parse | get extension) =~ "mp3" {
cp $filename $"($file)-clean.mp3"
} else {
ffmpeg -loglevel 1 -i $"($filename)" -acodec libmp3lame -ab 128k -vn $"($file)-clean.mp3"
}
} else {
media remove-noise $filename $start $end 0.2 $"($file)-clean" -d false -E mp3
}
print (echo-g "transcribing to text...")
match $translate {
false => {whisper $"($file)-clean.mp3" --language $language --output_format $output_format --verbose False --fp16 False},
true => {whisper $"($file)-clean.mp3" --language $language --output_format $output_format --verbose False --fp16 False --task translate}
}
if $notify {"transcription finished!" | tasker send-notification}
}
#video to text transcription
export def "ai video2text" [
file?:string #video file name with extension
--language(-l):string = "Spanish" #language of audio file
--filter_noise(-f) = false #filter audio noise
--notify(-n) #notify to android via join/tasker
] {
let file = get-input $in $file
media extract-audio $file
ai audio2text $"($file | path parse | get stem).mp3" -l $language -f $filter_noise
if $notify {"audio extracted!" | tasker send-notification}
}
#get a summary of a video, audio, subtitle file or youtube video url via ai
#
export def "ai media-summary" [
file:string # video, audio or subtitle file (vtt, srt, txt, url) file name with extension
--lang(-l):string = "Spanish" # language of the summary
--gpt4(-g) # to use gpt-4o instead of gpt-4o-mini
--gemini(-G) # use google gemini-1.5-pro instead of gpt
--claude(-C) # use anthropic claude
--notify(-n) # notify to android via join/tasker
--upload(-u) # upload extracted audio to gdrive
--type(-t): string = "meeting" # meeting, youtube, class or instructions
--filter_noise(-f) # filter audio noise
] {
let file = get-input $in $file -n
if ($file | is-empty) {return-error "no input provided!"}
mut title = ($file | path parse | get stem)
let extension = ($file | path parse | get extension)
let prompt = $"does the extension file format ($file) correspond to and audio, video or subtitle file; or an url?. IMPORTANT: include as subtitle type files with txt extension. Please only return your response in json format, with the unique key 'answer' and one of the key values: video, audio, subtitle, url or none. In plain text without any markdown formatting, ie, without ```"
let media_type = google_ai $prompt | ai fix-json | get answer
match $media_type {
"video" => {ai video2text $file -l $lang -f $filter_noise},
"audio" => {ai audio2text $file -l $lang -f $filter_noise},
"subtitle" => {
match $extension {
"vtt" => {ffmpeg -i $file -f srt $"($title)-clean.txt"},
"srt" => {mv -f $file $"($title)-clean.txt"},
"txt" => {mv -f $file $"($title)-clean.txt"},
_ => {return-error "input not supported!"}
}
},
"url" => {
let subtitle_file = ai yt-get-transcription $file
$title = ($subtitle_file | path parse | get stem)
mv -f $subtitle_file $"($title)-clean.txt"
}
_ => {return-error $"wrong media type: ($file)"}
}
let system_prompt = match $type {
"meeting" => {"meeting_summarizer"},
"youtube" => {"ytvideo_summarizer"},
"class" => {"class_transcriptor"},
"instructions" => {"instructions_extractor"},
_ => {return-error "not a valid type!"}
}
let pre_prompt = match $type {
"meeting" => {"consolidate_transcription"},
"youtube" => {"consolidate_ytvideo"},
"class" => {"consolidate_class"},
"instructions" => {"extract_instructions"} #crear consolidate_instructions
}
if $upload and $media_type in ["video" "audio" "url"] {
print (echo-g $"uploading audio file...")
cp $"($title)-clean.mp3" $env.MY_ENV_VARS.gdriveTranscriptionSummaryDirectory
}
print (echo-g $"transcription file saved as ($title)-clean.txt")
let the_subtitle = $"($title)-clean.txt"
#removing existing temp files
ls | where name =~ "split|summaries" | rm-pipe
#definitions
let output = $"($title)_summary.md"
# dealing with the case when the transcription files has too many words for chatgpt AQUI
let max_words = if $gemini {700000} else if $claude {150000} else {100000}
let n_words = wc -w $the_subtitle | awk '{print $1}' | into int
if $n_words > $max_words {
print (echo-g $"splitting transcription of ($title)...")
let filenames = $"($title)_split_"
let split_command = ("awk '{total+=NF; print > " + $"\"($filenames)\"" + "sprintf(\"%03d\",int(total/" + $"($max_words)" + "))" + "\".txt\"}'" + $" \"($the_subtitle)\"")
bash -c $split_command
let files = (ls | find split | where name !~ summary | ansi-strip-table)
$files | each {|split_file|
let t_input = (open ($split_file | get name))
let t_output = ($split_file | get name | path parse | get stem)
ai transcription-summary $t_input $t_output -g $gpt4 -t $type -G $gemini -C $claude
}
let temp_output = $"($title)_summaries.md"
print (echo-g $"combining the results into ($temp_output)...")
touch $temp_output
let files = (ls | find split | find summary | enumerate)
$files | each {|split_file|
echo $"\n\nResumen de la parte ($split_file.index):\n\n" | save --append $temp_output
open ($split_file.item.name | ansi strip) | save --append $temp_output
echo "\n" | save --append $temp_output
}
let prompt = (open $temp_output)
let model = if $gemini {"gemini"} else if $claude {"claude"} else {"chatgpt"}
print (echo-g $"asking ($model) to combine the results in ($temp_output)...")
if $gpt4 {
chat_gpt $prompt -t 0.5 --select_system $system_prompt --select_preprompt $pre_prompt -d -m gpt-4
} else if $gemini {
google_ai $prompt -t 0.5 --select_system $system_prompt --select_preprompt $pre_prompt -d true -m gemini-1.5-pro
} else if $claude {
claude_ai $prompt -t 0.5 --select_system $system_prompt --select_preprompt $pre_prompt -d true -m claude-3.5
} else {
chat_gpt $prompt -t 0.5 --select_system $system_prompt --select_preprompt $pre_prompt -d
}
| save -f $output
if $notify {"summary finished!" | tasker send-notification}
if $upload {cp $output $env.MY_ENV_VARS.gdriveTranscriptionSummaryDirectory}
return
}
ai transcription-summary (open $the_subtitle) $output -g $gpt4 -t $type -G $gemini -C $claude
if $upload {cp $output $env.MY_ENV_VARS.gdriveTranscriptionSummaryDirectory}
if $notify {"summary finished!" | tasker send-notification}
}
#resume video transcription text via gpt
export def "ai transcription-summary" [
prompt #transcription text
output #output name without extension
--gpt4(-g) = false #whether to use gpt-4o
--gemini(-G) = false #use google gemini-1.5-pro
--claude(-C) = false #use anthropic claide
--type(-t): string = "meeting" # meeting, youtube, class or instructions
--notify(-n) #notify to android via join/tasker
] {
let output_file = $"($output | path parse | get stem).md"
let model = if $gemini {"gemini"} else if $claude {"claude"} else {"chatgpt"}
let system_prompt = match $type {
"meeting" => {"meeting_summarizer"},
"youtube" => {"ytvideo_summarizer"},
"class" => {"class_transcriptor"},
"instructions" => {"instructions_extractor"},
_ => {return-error "not a valid type!"}
}
let pre_prompt = match $type {
"meeting" => {"summarize_transcription"},
"youtube" => {"summarize_ytvideo"},
"class" => {"process_class"},
"instructions" => {"extract_instructions"}
}
print (echo-g $"asking ($model) for a summary of the file transcription...")
if $gpt4 {
chat_gpt $prompt -t 0.5 --select_system $system_prompt --select_preprompt $pre_prompt -d -m gpt-4
} else if $gemini {
google_ai $prompt -t 0.5 --select_system $system_prompt --select_preprompt $pre_prompt -d true -m gemini-1.5-pro
} else if $claude {
claude_ai $prompt -t 0.5 --select_system $system_prompt --select_preprompt $pre_prompt -d true -m claude-3.5
} else {
chat_gpt $prompt -t 0.5 --select_system $system_prompt --select_preprompt $pre_prompt -d
}
| save -f $output_file
if $notify {"summary finished!" | tasker send-notification}
}
#get transcription of youtube video url
#
#First it tries to download the transcription. If it doens't success, it downloads audio and trancribe it using whisper.
#
#Two characters words for languages
#es: spanish
#fr: french
export def "ai yt-get-transcription" [
url?:string # video url
--lang = "en" # language of the summary (default english)
] {
#deleting previous temp file
if ((ls | find yt_temp | length) > 0) {rm yt_temp* | ignore}
#getting the subtitle
yt-dlp -N 10 --write-info-json $url --output yt_temp --skip-download
let video_info = (open yt_temp.info.json)
let title = ($video_info | get title)
let subtitles_info = ($video_info | get subtitles?)
let languages = ($subtitles_info | columns)
let the_language = ($languages | find $lang)
let the_subtitle = $"($title).txt"
if ($the_language | is-empty) {
#first try auto-subs then whisper
yt-dlp -N 10 --write-auto-subs $url --output yt_temp --skip-download
if ((ls | find yt_temp | find vtt | length) > 0) {
ffmpeg -i (ls yt_temp*.vtt | get 0 | get name) -f srt $the_subtitle
} else {
print (echo-g "downloading audio...")
yt-dlp --extract-audio --audio-format mp3 --audio-quality 0 $url -o $"($title).mp3"
print (echo-g "transcribing audio...")
whisper $"($title).mp3" --output_format srt --verbose False --fp16 False
mv -f $"($title).mp3" $the_subtitle
}
} else {
let sub_url = (
$subtitles_info
| get ($the_language | get 0)
| where ext =~ "vtt"
| get url
| get 0
)
http get $sub_url | save -f $the_subtitle
ffmpeg -i $"($title).vtt" -f srt $the_subtitle
}
print (echo-g $"transcription file saved as ($the_subtitle)")
ls | find yt_temp | rm-pipe
return $the_subtitle
}
#generate subtitles of video file via whisper and mymemmory/openai api
#
#`? trans` and `whisper --help` for more info on languages
export def "ai generate-subtitles" [
file #input video file
--language(-l) = "en-US/English" #language of input video file, mymmemory/whisper
--translate(-t) = false #to translate to spanish
--notify(-n) #notify to android via join/tasker
] {
let filename = $file | path parse | get stem
media extract-audio $file
ai audio2text $"($filename).mp3" -o srt -l ($language | split row "/" | get 1)
if $notify {"subtitle generated!" | tasker send-notification}