-
Notifications
You must be signed in to change notification settings - Fork 0
/
lifeAIplayer.py
executable file
·1039 lines (852 loc) · 45.3 KB
/
lifeAIplayer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
## Life AI Player ZMQ client
#
# Chris Kennedy 2023 (C) GPL
#
# Free to use for any use as in truly free software
# as Richard Stallman intended it to be.
#
import io
import zmq
import argparse
import soundfile as sf
from PIL import Image
import re
import os
import cv2
import numpy as np
import logging
import time
import soundfile as sf
import pygame
import queue
import threading
from queue import PriorityQueue
from PIL import Image, ImageDraw, ImageFont
import cv2
import textwrap
import json
from collections import deque
from pydub import AudioSegment
import magic
from dotenv import load_dotenv
import NDIlib as ndi
import webuiapi
from diffusers import StableDiffusionPipeline
import torch
from transformers import logging as trlogging
import random
load_dotenv()
def generate_sd_webui(prompt):
try:
result = sdui_api.txt2img(prompt=prompt,
negative_prompt=args.negative_prompt,
save_images=False,
width=512,
height=512,
)
sdui_api.util_wait_for_ready()
if result.image is None:
logger.error(f"Error generating image: {result.error}")
return None
return result.image
except Exception as e:
logger.error(f"Error generating image: {e}")
return None
# Queue to store the last images
past_images_queue = deque(maxlen=6) # Assuming 6 images for each side
#past_images_all_time_queue = deque(maxlen=1000) # Assuming 100 images for each side
def create_16_9_image(center_image, side_images, target_width, target_height):
# Scale the main image to fit the height of the 16:9 image
main_image_scaled = center_image.resize((target_height, target_height), Image.LANCZOS)
# Create a new image with the target 16:9 dimensions
final_image = Image.new('RGB', (target_width, target_height))
# Calculate the width of the area on each side of the main image
side_area_width = (target_width - target_height) // 2
# Calculate the size for the side images to fill the space as much as possible
# Given we want to fit 3 images per side, we divide the height by 3
side_image_size = target_height // 3
# Split the side images for left and right
left_side_images = side_images[:3]
right_side_images = side_images[3:6]
# Resize side images to fill the space
left_resized_side_images = [img.resize((side_image_size, side_image_size), Image.LANCZOS) for img in left_side_images]
right_resized_side_images = [img.resize((side_image_size, side_image_size), Image.LANCZOS) for img in right_side_images]
# Paste the side images to fill the left and right areas
for i in range(3):
# Left side images
final_image.paste(left_resized_side_images[i], (0, i * side_image_size))
# Right side images
final_image.paste(right_resized_side_images[i], (target_width - side_image_size, i * side_image_size))
# Paste the scaled main image in the center
final_image.paste(main_image_scaled, (side_area_width, 0))
return final_image
def create_filmstrip_images(center_image, side_images):
# Assuming side_images is a list of 6 images, 3 for left and 3 for right
left_images = side_images[:3]
right_images = side_images[3:]
# Combine the left images, the center image, and the right images horizontally
images_to_combine = left_images + [center_image] + right_images
combined_width = sum(img.size[0] for img in images_to_combine)
combined_height = max(img.size[1] for img in images_to_combine)
# Create a new image with the combined dimensions
wide_image = Image.new('RGB', (combined_width, combined_height))
print(f"Combined Width {combined_width} x Combined Height {combined_height}")
# Paste the images into the wide_image
x_offset = 0
for img in images_to_combine:
wide_image.paste(img, (x_offset, 0))
x_offset += img.size[0]
return wide_image
# Main function to process the new image
def process_new_image(new_image, text, args, unique_image=False, banner=""):
target_width = args.width # This should be set to the desired width for 16:9 aspect ratio
target_height = args.height # This should be set to the height corresponding to the 16:9 aspect ratio
# Check if we have enough images to fill the sides
if len(past_images_queue) >= 6:
# Use the 6 most recent images for each side
side_images = list(past_images_queue)
final_image = create_16_9_image(new_image, side_images, target_width, target_height)
final_image = add_text_to_image(final_image, text, banner)
else:
# Not enough images, just add text to the new_image
final_image = add_text_to_image(new_image, text, banner)
# Add the new image to the queue for future use
if unique_image:
past_images_queue.appendleft(new_image)
#past_images_all_time_queue.appendleft(new_image)
return final_image
## Japanese writing on images
def draw_japanese_text_on_image(image_np, text, position, font_path, font_size):
# Convert to a PIL Image
image_pil = Image.fromarray(cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB))
# Prepare drawing context
draw = ImageDraw.Draw(image_pil)
font = ImageFont.truetype(font_path, font_size)
# Define the border width for the text
border_width = 5
# Get text size using getbbox
x, y = position
bbox = font.getbbox(text)
text_width, text_height = bbox[2], bbox[3]
y = y - text_height
x = x + text_width / 2
# Draw text border (outline)
for i in range(-border_width, border_width + 1):
for j in range(-border_width, border_width + 1):
draw.text((x + i, y + j), text, font=font, fill=(0, 0, 0)) # Black border
# Draw text on image
draw.text((x, y), text, font=font, fill=(255, 255, 255)) # White fill
# Convert back to NumPy array
image_np = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)
return image_np
def add_text_to_image(image, text, banner=""):
if image is not None:
logger.info(f"Adding text to image: {text[:80]}")
# Maintain aspect ratio and add black bars
width, height = image.size
desired_ratio = width / height
current_ratio = width / height
# Convert the PIL Image to a NumPy array for OpenCV operations
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert from RGB to BGR for OpenCV
if current_ratio > desired_ratio:
new_width = int(height * desired_ratio)
padding = max(0, (new_width - width) // 2)
image = cv2.copyMakeBorder(image, 0, 0, padding, padding, cv2.BORDER_CONSTANT, value=[0, 0, 0])
else:
new_height = int(width / desired_ratio)
padding = max(0, (new_height - height) // 2)
image = cv2.copyMakeBorder(image, padding, padding, 0, 0, cv2.BORDER_CONSTANT, value=[0, 0, 0])
# Resize to the desired resolution
# Calculate the scaling factors
x_scale = args.width / image.shape[1]
y_scale = args.height / image.shape[0]
scale_factor = min(x_scale, y_scale)
# Compute new width and height while maintaining the aspect ratio
new_width = int(image.shape[1] * scale_factor)
new_height = int(image.shape[0] * scale_factor)
# Resize the image
image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
# If the resized image doesn't match the desired resolution, pad with black
if new_width != args.width or new_height != args.height:
top_padding = (args.height - new_height) // 2
bottom_padding = args.height - new_height - top_padding
left_padding = (args.width - new_width) // 2
right_padding = args.width - new_width - left_padding
image = cv2.copyMakeBorder(image, top_padding, bottom_padding, left_padding, right_padding, cv2.BORDER_CONSTANT, value=[0, 0, 0])
width, height = image.shape[1], image.shape[0]
current_ratio = width / height
def contains_japanese(text):
for char in text:
if any([start <= ord(char) <= end for start, end in [
(0x3040, 0x309F), # Hiragana
(0x30A0, 0x30FF), # Katakana
(0x4E00, 0x9FFF), # Kanji
(0x3400, 0x4DBF) # Kanji (extension A)
]]):
return True
return False
wrap_width = 30
if current_ratio > 1.0:
wrap_width = 50
wrapped_text = textwrap.wrap(text, width=wrap_width, fix_sentence_endings=False, break_long_words=False, break_on_hyphens=False)
y_pos = height # Adjusted height from bottom
font_size = 2
font_thickness = 6
border_thickness = 15
# Configuration for the banner text
banner_font_size = 1 # Smaller font size for the banner
banner_font_thickness = 2 # Thickness of the banner font
banner_outline_color = (0, 0, 0) # Black outline for better visibility
banner_border_thickness = 1 # Thickness of the text outline
# Draw banner if it's not empty on the top of the image from left to right
if banner != "":
((text_width, text_height), baseline) = cv2.getTextSize(
banner[:width-10], cv2.FONT_HERSHEY_DUPLEX, banner_font_size, banner_font_thickness)
x_pos_t = 5 # Adjusted width from left
y_pos_t = text_height + 10 # Adjusted height from top, with some padding
# Draw a solid black rectangle for the banner background
overlay = image.copy()
cv2.rectangle(overlay, (0, 0), (width, y_pos_t + 10), (0, 0, 0), -1)
# Draw text shadow for the banner
shadow_offset = 2 # Smaller offset for the shadow
cv2.putText(image, banner[:width-10], (x_pos_t + shadow_offset, y_pos_t - shadow_offset),
cv2.FONT_HERSHEY_DUPLEX, banner_font_size, (0, 0, 0), banner_font_thickness)
alpha = 0.5 # Transparency factor.
image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
# Draw text outline for the banner
cv2.putText(image, banner[:width-10], (x_pos_t, y_pos_t), cv2.FONT_HERSHEY_DUPLEX, banner_font_size, banner_outline_color, banner_border_thickness)
# Draw the main banner text
cv2.putText(image, banner[:width-10], (x_pos_t, y_pos_t), cv2.FONT_HERSHEY_DUPLEX, banner_font_size, (255, 255, 0), banner_font_thickness)
for line in reversed(wrapped_text):
# Get the text size, baseline, and adjust the y_pos
((text_width, text_height), baseline) = cv2.getTextSize(line[:width], cv2.FONT_HERSHEY_DUPLEX, font_size, font_thickness)
x_pos = (width - text_width) // 2 # Center the text
y_pos -= (baseline + text_height + 10) # Adjust y_pos for each line, reduce padding
# Calculate the rectangle coordinates with less height
rect_x_left = x_pos - 10
rect_y_top = y_pos - text_height - 10 # Reduced padding for height
rect_x_right = x_pos + text_width + 10
rect_y_bottom = y_pos + 18 # Reduced padding for height
# Draw a semi-transparent rectangle
overlay = image.copy()
cv2.rectangle(overlay, (rect_x_left, rect_y_top), (rect_x_right, rect_y_bottom), (0, 0, 0), -1)
alpha = 0.0 # Transparency factor.
image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
# Draw text shadow for a drop shadow effect
shadow_offset = 4 # Offset for the shadow, adjust as needed
cv2.putText(image, line[:width], (x_pos + shadow_offset, y_pos + shadow_offset), cv2.FONT_HERSHEY_DUPLEX, font_size, (0, 0, 0), font_thickness)
# Draw text outline
cv2.putText(image, line[:width], (x_pos, y_pos), cv2.FONT_HERSHEY_DUPLEX, font_size, (0, 0, 0), border_thickness)
# Draw the main text
cv2.putText(image, line[:width], (x_pos, y_pos), cv2.FONT_HERSHEY_DUPLEX, font_size, (255, 255, 255), font_thickness)
# Convert back from numpy array
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return image # returning the modified image
def image_to_ascii(image):
image = image.resize((args.width, int((image.height/image.width) * args.width * 0.55)), Image.LANCZOS)
image = image.convert('L') # Convert to grayscale
pixels = list(image.getdata())
ascii_chars = ["@", "#", "S", "%", "?", "*", "+", ";", ":", ",", "."]
ascii_image = [ascii_chars[pixel//25] for pixel in pixels]
ascii_image = ''.join([''.join(ascii_image[i:i+args.width]) + '\n' for i in range(0, len(ascii_image), args.width)])
return ascii_image
def update_image(duration_ms):
if not cv_display:
time.sleep(duration_ms/1000)
return
k = cv2.waitKey(duration_ms) & 0xFF
if k == ord('f'):
print(f"Render: Toggling fullscreen.")
cv2.moveWindow(f"{args.title} (local)", 0, 0)
# maximize_window()
cv2.setWindowProperty(f"{args.title} (local)", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
elif k == ord('m'):
print(f"Render: Toggling maximized.")
cv2.setWindowProperty(f"{args.title} (local)", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
elif k == ord('q') or k == 27:
print(f"Render: Quitting.")
cv2.destroyAllWindows()
def convert_rgbx_to_yuv420(rgbx_img):
# Remove the alpha channel
rgb_img = rgbx_img[:, :, :3]
# Convert RGB to YUV
yuv_img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2YUV)
# Subsample the U and V channels
Y, U, V = cv2.split(yuv_img)
U = cv2.resize(U, (U.shape[1] // 2, U.shape[0] // 2), interpolation=cv2.INTER_AREA)
V = cv2.resize(V, (V.shape[1] // 2, V.shape[0] // 2), interpolation=cv2.INTER_AREA)
# Correctly layout YUV420
yuv420_img = np.concatenate([Y.flatten(), U.flatten(), V.flatten()])
return yuv420_img
def render(image, duration):
image = np.copy(image)
if cv_display:
# Convert RGB to BGR (OpenCV uses BGR format)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow(f"{args.title} (local)", image)
duration_ms = 1
update_image(duration_ms)
# NDI Video from Images
if ndi_display:
video_frame.FourCC = ndi.FOURCC_VIDEO_TYPE_I420
yuv420_image = convert_rgbx_to_yuv420(image)
video_frame.data = yuv420_image
original_height, original_width = image.shape[:2]
video_frame.xres = original_width
video_frame.yres = original_height
video_frame.line_stride_in_bytes = original_width # Stride for Y plane
video_frame.picture_aspect_ratio = float(original_width) / float(original_height)
ndi.send_send_video_v2(ndi_send, video_frame)
def save_json(header, mediaid, type, segment_number):
assets_dir = f"assets/{mediaid}/{type}"
os.makedirs(assets_dir, exist_ok=True)
with open(f"{assets_dir}/{mediaid}_{type}_{segment_number}.json", 'w') as json_file:
json.dump(header, json_file)
def save_asset(asset, mediaid, segment_number, type):
directory = f"assets/{mediaid}/{type}"
os.makedirs(directory, exist_ok=True)
file_path = f"{directory}/{mediaid}_{type}_{segment_number}"
if type == "speek":
file_path += ".wav"
with open(file_path, 'wb') as file:
file.write(asset)
elif type == "image":
file_path += ".png"
img_byte_arr = io.BytesIO()
asset.save(img_byte_arr, format="PNG") # Save it as PNG or JPEG depending on your preference
asset = img_byte_arr.getvalue()
with open(file_path, 'wb') as f:
f.write(asset)
elif type == "music":
file_path += ".wav"
with open(file_path, 'wb') as file:
file.write(asset)
def get_audio_duration(audio_samples):
audio_segment = AudioSegment.from_file(io.BytesIO(audio_samples), format="wav")
duration_ms = len(audio_segment) # Duration in milliseconds
duration_s = duration_ms / 1000.0 # Convert to seconds
return duration_s
class BackgroundMusic(threading.Thread):
def __init__(self):
super().__init__()
self.audio_buffer = None
self.running = True
self.lock = threading.Lock() # Lock to synchronize access to audio_buffer
self.channel = audio_channel_music # Assign a specific channel
self.complete = True
self.switching = False
def run(self):
while self.running:
if self.audio_buffer:
self.play_audio()
else:
time.sleep(0.1)
def play_audio(self):
audiobuf = None
with self.lock:
if self.audio_buffer != None:
audiobuf = io.BytesIO(self.audio_buffer)
if audiobuf:
# Load the audio data into a Sound object
sound = pygame.mixer.Sound(audiobuf)
self.switching = False
self.channel.play(sound, loops=0, maxtime=0, fade_ms=10) # Play the Sound object on this channel
while self.channel.get_busy(): # Wait for playback to finish
time.sleep(0.1)
else:
print(f"Music Thread: *** No audio buffer to play for play_audio().")
time.sleep(1)
def change_track(self, audio_buffer):
with self.lock:
# Update the audio buffer with the new track
self.audio_buffer = audio_buffer
self.switching = True
self.channel.fadeout(10) # Fade out the audio
def pause(self):
with self.lock:
self.channel.pause()
def unpause(self):
with self.lock:
self.channel.unpause()
def stop(self):
self.running = False
def prepare_audio_frame(audio_segment, sample_rate=48000, no_channels=2):
# Convert audio_segment to samples
samples = np.array(audio_segment.get_array_of_samples(), dtype=np.float32)
audio_frame = ndi.AudioFrameV2()
# Set the audio frame properties
audio_frame.sample_rate = sample_rate
audio_frame.no_channels = no_channels
audio_frame.no_samples = len(samples) // no_channels
audio_frame.timecode = ndi.send_timecode_synthesize
# NDI SDK expects interleaved float32 audio data
audio_frame.p_data = samples
return audio_frame
def play_audio(audio_data, target_sample_rate=22050, no_channels=2, duration=1):
# Detect the mime type of the audio data
mime_type = magic.from_buffer(audio_data, mime=True)
# NDI Audio
if args.ndi_audio:
target_sample_rate = 48000;
audio_segment = AudioSegment.from_file(io.BytesIO(audio_data), format="wav")
audio_frame = prepare_audio_frame(audio_segment, sample_rate=target_sample_rate, no_channels=no_channels)
ndi.send_send_audio_v2(ndi_send, audio_frame)
time.sleep(duration)
return
# Load the audio data into an AudioSegment
if mime_type in ['audio/x-wav', 'audio/wav']:
audio_segment = AudioSegment.from_file(io.BytesIO(audio_data), format='wav')
elif mime_type in ['audio/aac', 'audio/x-aac', 'audio/x-hx-aac-adts']:
audio_segment = AudioSegment.from_file(io.BytesIO(audio_data), format='aac')
# Increase the volume by 10 dB
audio_segment += 10
else:
raise ValueError(f"Unsupported audio format: {mime_type}")
# Resample the audio to the target sample rate if necessary
if audio_segment.frame_rate != target_sample_rate:
audio_segment = audio_segment.set_frame_rate(target_sample_rate)
# Export the resampled audio to a bytes buffer
wav_io = io.BytesIO()
audio_segment.export(wav_io, format='wav')
wav_io.seek(0)
audio_data = wav_io.read()
# Load the WAV data into Pygame
sound = pygame.mixer.Sound(io.BytesIO(audio_data))
# Play the audio on the selected channel
print(f"*** Playing audio on channel {audio_channel_speech.get_busy()}")
audio_channel_speech.play(sound)
def playback(image, audio, duration):
# play both audio and display image with audio blocking till finished
if image and not args.norender:
render(image, duration)
if audio:
play_audio(audio, 22050, 2, duration)
print(f"Audio playback initiated.")
def get_audio_duration(audio_samples):
audio_segment = AudioSegment.from_file(io.BytesIO(audio_samples), format="wav")
duration_ms = len(audio_segment) # Duration in milliseconds
duration_s = duration_ms / 1000.0 # Convert to seconds
return duration_s
def main():
## Main routine
bg_music = BackgroundMusic()
bg_music.start()
last_music_change = 0
last_image_asset = None
last_text_asset = "Ask me a Question - Groovy Life AI: tibetan mountains"
image_segment_number = 0
audio_segment_number = 0
last_sent_segments = time.time()
audio_playback_complete_speech = True
audio_latency_delta = 0
image_latency_delta = 0
stats_last_sent_ts = 0
stats_last_sent_duration = 0.0
end_of_stream = True
last_sent_break = 0
last_generated_image_time = 0
while True:
# check if we will block, if so then don't and check events instead of pygame
header_message = None
segment_number = 0
timestamp = 0
mediaid = 0
message = ""
text = ""
optimized_prompt = ""
type = ""
music = None
audio = None
image = None
if socket.poll(timeout=0):
# Receive the header message
header_message = socket.recv_json()
segment_number = header_message["segment_number"]
timestamp = header_message["timestamp"]
mediaid = header_message["mediaid"]
message = header_message["message"]
text = header_message["text"]
optimized_prompt = text
if 'optimized_text' in header_message:
optimized_prompt = header_message["optimized_text"]
type = header_message["stream"]
if type == "music":
# Now, receive the binary audio data
music = socket.recv()
# Print the header
logger.info(f"Received {type} segment #{segment_number} {timestamp}: {mediaid} {len(text)} characters: {text[:20]}")
try:
if args.save == True:
# Save audio asset
save_json(header_message, mediaid, type, segment_number)
save_asset(music, mediaid, segment_number, type)
except Exception as e:
logger.error(f"Error saving music asset: {e}")
# queue in music_buffer header and music
music_buffer.put((header_message, music))
print(f"M", end="", flush=True)
if type == "speek":
# Now, receive the binary audio data
audio = socket.recv()
# Print the header
logger.info(f"Received {type} segment #{segment_number} {timestamp}: {mediaid} {len(text)} characters: {text[:20]}")
# queue the header and audio together
audio_buffer.put((header_message, audio))
try:
if args.save == True:
save_json(header_message, mediaid, type, segment_number)
save_asset(audio, mediaid, segment_number, type)
except Exception as e:
logger.error(f"Error saving audio asset: {e}")
print(f"S", end="", flush=True)
## Image
if type == "image":
# Now, receive the binary audio data
image = socket.recv()
# Print the header
logger.info(f"Received image segment {type} #{segment_number} {timestamp}: {mediaid} {len(text)} characters: {text[:20]}")
try:
# Convert the bytes back to a PIL Image object
image = Image.open(io.BytesIO(image))
if args.show_ascii_art:
payload_hex = image_to_ascii(image)
print(f"\n{payload_hex}\n", flush=True)
logger.info(f"Image Prompt: {optimized_prompt[:20]}\Original Text: {text[:10]}...\nOriginal Question:{message[:10]}...")
# queue the header and image together
image_buffer.put((header_message, image))
except Exception as e:
logger.error(f"Error converting image: {e}")
try:
if args.save == True:
save_json(header_message, mediaid, type, segment_number)
save_asset(image, mediaid, segment_number, type)
except Exception as e:
logger.error(f"Error saving image asset: {e}")
print(f"I", end="", flush=True)
else:
## No ZMQ message available, check for events
print(f".", end="", flush=True)
worked = False
## Update the image if we are rendering during speaking
if not audio_playback_complete_speech:
update_image(1)
worked = True
# No message available, check for events
if pygame.event.peek(AUDIO_END_EVENT_SPEECH):
for event in pygame.event.get(AUDIO_END_EVENT_SPEECH):
if event.type == AUDIO_END_EVENT_SPEECH:
print(f"X", end="", flush=True)
audio_playback_complete_speech = True
else:
logger.error(f"Unknown event on get event: {event}")
worked = True
# Send status of last playback time for the audio and image assets
status = {}
status["timestamp"] = time.time()
status["audio_segment_number"] = audio_segment_number
status["image_segment_number"] = image_segment_number
status["audio_playback_complete_speech"] = audio_playback_complete_speech
status["last_sent_segments"] = last_sent_segments
status["last_music_change"] = last_music_change
status["audio_buffer_size"] = audio_buffer.qsize()
status["image_buffer_size"] = image_buffer.qsize()
status["music_buffer_size"] = music_buffer.qsize()
status["audio_playback_complete_speech"] = audio_playback_complete_speech
status["audio_channel_speech_busy"] = audio_channel_speech.get_busy()
status["audio_channel_music_busy"] = audio_channel_music.get_busy()
status["audio_channel_music_volume"] = audio_channel_music.get_volume()
status["audio_channel_speech_volume"] = audio_channel_speech.get_volume()
status["audio_channel_music_switching"] = bg_music.switching
status["audio_channel_music_complete"] = bg_music.complete
status["audio_channel_music_running"] = bg_music.running
status["audio_latency_delta"] = audio_latency_delta
status["image_latency_delta"] = image_latency_delta
status["eos"] = end_of_stream
# calculate the duration field total for all the queued audio packets
# don't remove them from the queue, just get the duration field and add it to the total
total_duration = 0.0
for audio_message, audio_asset in audio_buffer.queue:
total_duration += audio_message["duration"]
status["audio_buffer_duration"] = total_duration
# send status to zmq output
sent_delta = time.time() - last_sent_segments
status["sent_delta"] = sent_delta
if sent_delta < args.startup_delay:
# check if we have 0 seconds of buffer, if so then fake it as 60 seconds and send it
if status["audio_buffer_duration"] == 0.0:
status["audio_buffer_duration"] = 60.0
if time.time() - stats_last_sent_ts > args.stats_interval or stats_last_sent_duration != status["audio_buffer_duration"]:
logger.info(f"Sending status: {status}")
sender.send_json(status)
stats_last_sent_ts = time.time()
stats_last_sent_duration = status["audio_buffer_duration"]
# check audio_buffer_duration, if 0 and image_buffer is empty, then resend the image with the last text
if status["audio_buffer_duration"] == 0.0 and image_buffer.empty() and audio_buffer.empty():
if time.time() - last_generated_image_time > args.slideshow_interval:
# get the last text and image
if last_text_asset:
## get an image randomly from past_images_all_time_queue and use it as the base image
#past_image = random.choice(past_images_all_time_queue)
# randomly pick a list member of the text_history list
random_message = random.choice(text_history)
random_message2 = random.choice(text_history)
random_message3 = random.choice(text_history)
# Generate a new image from previous text
new_image = generate_sd_webui(random_message + " " + random_message2 + " " + random_message3)
if new_image != None:
last_image_asset = new_image
else:
#print(f"Error generating image from text: {random_message} {random_message2} {random_message3}")
#past_image = random.choice(past_images_all_time_queue)
new_image = last_image_asset
image_np = process_new_image(new_image, last_text_asset, args, True, "Groovy Life AI")
# send image directly to NDI
render(image_np, 1)
last_generated_image_time = time.time()
## get an audio sample and header, get the text field from it, then get an image and header and burn in the text from the audio header to the image and render it while playing the audio
if args.nobuffer and args.norender and not audio_buffer.empty() and audio_playback_complete_speech:
audio_message, audio_asset = audio_buffer.get()
text = audio_message["text"]
duration = audio_message["duration"]
optimized_prompt = text
if 'optimized_text' in audio_message:
optimized_prompt = audio_message["optimized_text"]
else:
optimized_prompt = text
audio_playback_complete_speech = False
playback(None, audio_asset, duration)
last_sent_segments = time.time()
audio_segment_number = audio_message["segment_number"]
logger.info(f"Sent audio segment #{audio_message['segment_number']} at timestamp {audio_message['timestamp']}")
worked = True
elif not audio_buffer.empty() and not image_buffer.empty() and audio_playback_complete_speech:
audio_message, audio_asset = audio_buffer.get()
image_message, image_asset = image_buffer.get()
image_segment_number = image_message["segment_number"]
audio_segment_number = audio_message["segment_number"]
last_sent_segments = time.time()
duration = audio_message["duration"]
if "eos" in audio_message and audio_message["eos"] == True:
end_of_stream = True
bg_music.unpause()
else:
end_of_stream = False
bg_music.pause()
text = audio_message["text"]
optimized_prompt = text
if 'optimized_text' in audio_message:
optimized_prompt = audio_message["optimized_text"]
if audio_message['timestamp'] < image_message['timestamp']:
logger.debug(f"Audio segment #{audio_message['segment_number']} is older than image segment #{image_message['segment_number']}.")
if audio_message['timestamp'] > image_message['timestamp']:
logger.debug(f"Audio segment #{audio_message['segment_number']} is newer than image segment #{image_message['segment_number']}.")
unique_image = True
if "throttle" in image_message:
if image_message["throttle"] == "true":
unique_image = False
last_image_asset = image_asset.copy()
last_text_asset = text
# store text in our history
text_history.append(text)
banner_msg = f"{audio_message['username']} asked {audio_message['message'][:300]}"
if 'eos' in audio_message and audio_message['eos'] == True:
banner_msg = f"{audio_message['message']}"
if args.burn_prompt:
image_np = process_new_image(
image_asset, optimized_prompt, args, unique_image, banner_msg)
else:
image_np = process_new_image(image_asset, text, args, unique_image, banner_msg)
# Play audio and display image
try:
audio_playback_complete_speech = False
playback(image_np, audio_asset, duration)
except Exception as e:
logger.error(f"Error playing back audio and displaying image: {e}")
worked = True
audio_timestamp = audio_message["timestamp"]
image_timestamp = image_message["timestamp"]
audio_latency_delta = 0
image_latency_delta = 0
if audio_timestamp != 0:
audio_latency_delta = int(round(time.time()*1000)) - int(audio_timestamp)
if image_timestamp != 0:
image_latency_delta = int(round(time.time()*1000)) - int(image_timestamp)
logger.info(f"Sent audio segment #{audio_message['segment_number']} at timestamp {audio_message['timestamp']} with latency delta {audio_latency_delta} ms.")
logger.info(f"Sent image segment #{image_message['segment_number']} at timestamp {image_message['timestamp']} with latency delta {image_latency_delta} ms.")
if end_of_stream and image_message['ainame'] == "passthrough":
# sleep so image displays for a bit
time.sleep(5)
else:
# check last sent segments and if it's been more than 5 seconds, send a blank image and audio
if time.time() - last_sent_segments > 15 and last_image_asset is not None:
# confirm image_segment_number and audio_segment_number are both matching, else we need see if audio has buffered
# samples to send and use the last image if there are no more image buffers
if image_segment_number != audio_segment_number and audio_playback_complete_speech:
logger.debug(f"Image segment number {image_segment_number} does not match audio segment number {audio_segment_number}.")
if image_buffer.empty():
if not audio_buffer.empty():
logger.info(f"Warning: A/V Alignment offset, audio buffer is not empty, using last image and audio.")
audio_message, audio_asset = audio_buffer.get()
text = audio_message["text"]
optimized_prompt = text
duration = audio_message["duration"]
new_image = False
if 'optimized_text' in audio_message:
optimized_prompt = audio_message["optimized_text"]
else:
optimized_prompt = text
banner_msg = f"{audio_message['username']} asked {audio_message['message'][:300]}"
if 'eos' in audio_message and audio_message['eos'] == True:
banner_msg = f"{audio_message['message']}"
image_np = process_new_image(
last_image_asset, optimized_prompt, args, new_image, banner_msg)
audio_playback_complete_speech = False
playback(image_np, audio_asset, duration)
last_sent_segments = time.time()
audio_segment_number = audio_message["segment_number"]
logger.info(f"Sent audio segment #{audio_message['segment_number']} at timestamp {audio_message['timestamp']}")
worked = True
timestamp = audio_message["timestamp"]
latency_delta = 0
if timestamp != 0:
latency_delta = int(round(time.time()*1000)) - int(timestamp)
logger.info(
f"Sent audio segment #{audio_message['segment_number']} at timestamp {audio_message['timestamp']} with latency delta {latency_delta} ms.")
else:
# check if we are in eos condition, if so send the last image seen with a special text burnin
if end_of_stream and time.time() - last_sent_break > 10: # send a break every 10 seconds
# check if any images are in the queue still
if not image_buffer.empty():
# get one image from the image buffer
image_message, image_asset = image_buffer.get()
image_segment_number = image_message["segment_number"]
last_image_asset = image_asset.copy()
logger.info(f"End of stream, sending last image with special text.")
# burn in special text
image_np = process_new_image(last_image_asset, "GroovyLife.AI", args, False, "Type !personalities or !message <personality> <question>")
playback(image_np, None, 0.0)
last_sent_break = time.time()
worked = True
logger.info(f"Sent last image segment #{image_segment_number} at timestamp {timestamp}")
if not music_buffer.empty():
if not args.music:
music_message, music = music_buffer.get()
while not music_buffer.empty():
music_message, music = music_buffer.get()
worked = True
continue
if not music_buffer.empty() and (last_music_change == 0 or time.time() - last_music_change > args.music_interval):
music_message, music = music_buffer.get()
logger.info(f"Loading Music: {music_message['mediaid']} {music_message['timestamp']} {music_message['segment_number']} {music_message['message'][:20]}")
if last_music_change > 0:
logger.info(f"Last Music change was {time.time() - last_music_change} seconds since the last music change.")
bg_music.change_track(music)
else:
# Load the initial music track
bg_music.change_track(music)
last_music_change = time.time()
worked = True
else:
logger.info(f"Skipping music because it's too soon since the last music change {time.time() - last_music_change}.")
## avoid busy loop
if not worked:
time.sleep(0.1)
ndi.send_destroy(ndi_send)
ndi.destroy()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_port", type=int, required=False, default=6003, help="Port for receiving image as PIL numpy arrays")
parser.add_argument("--input_host", type=str, required=False, default="127.0.0.1", help="Host for receiving image as PIL numpy arrays")
parser.add_argument("--output_port", type=int, required=False, default=6004, help="Port for sending status of last image and audio segments")
parser.add_argument("--output_host", type=str, required=False, default="127.0.0.1", help="Host for sending status of last image and audio segments")
parser.add_argument("-ll", "--loglevel", type=str, default="info", help="Logging level: debug, info...")
parser.add_argument("-f", "--freq", type=int, default=22050, help="Sampling frequency for audio playback")
parser.add_argument("--burn_prompt", action="store_true", default=False, help="Burn in the prompt that created the image")
parser.add_argument("--width", type=int, default=1920, help="Width of the output image")
parser.add_argument("--height", type=int, default=1080, help="Height of the output image")
parser.add_argument("--music_volume", type=float, default=0.50, help="Volume for music audio playback, defualt is 0.55")
parser.add_argument("--speech_volume", type=float, default=0.75, help="Volume for speech audio playback")
parser.add_argument("--music_interval", type=float, default=60, help="Interval between music changes")
parser.add_argument("--music", action="store_true", default=False, help="Enable music")
parser.add_argument("--save", action="store_true", default=False, help="Save assets to disk")
parser.add_argument("--norender", action="store_true", default=False, help="Disable rendering of images")
parser.add_argument("--nobuffer", action="store_true", default=False, help="Disable buffering of images")
parser.add_argument("--title", type=str, default="Groovy Life AI", help="Title for the window")
parser.add_argument("--buffer_size", type=int, default=32768, help="Size of the buffer for images and audio")
parser.add_argument("--show_ascii_art", action="store_true", default=False, help="Show images as ascii art")
parser.add_argument("--startup_delay", type=float, default=30.0, help="Delay before sending status messages")
parser.add_argument("--stats_interval", type=float, default=10.0, help="Interval between sending status messages")
parser.add_argument("--sdl_audiodriver", type=str, default="GroovyLifeAI", help="SDL Audio Driver, default is GroovyLifeAI")
parser.add_argument("--ndi_display", action="store_true", default=False, help="Send to NDI output")
parser.add_argument("--ndi_audio", action="store_true", default=False, help="Send audio to NDI output")
parser.add_argument("--sdwebui_image_model", type=str, default="sd_xl_turbo", help="Local SD WebUI API Image model to use, default sd_xl_turbo")
parser.add_argument("--negative_prompt", type=str, default="Disfigured, cartoon, blurry, nsfw, naked, porn, violence, gore, racism, black face", help="Negative prompt for the model")
parser.add_argument("--slideshow_interval", type=float, default=120.0, help="Interval between images in the slideshow");
args = parser.parse_args()
LOGLEVEL = logging.INFO
if args.loglevel == "info":
LOGLEVEL = logging.INFO
elif args.loglevel == "debug":
LOGLEVEL = logging.DEBUG
elif args.loglevel == "warning":
LOGLEVEL = logging.WARNING
else:
LOGLEVEL = logging.INFO
cv_display = True
ndi_display = args.ndi_display
if ndi_display:
cv_display = False
#os.environ['SDL_AUDIODRIVER'] = args.sdl_audiodriver
log_id = time.strftime("%Y%m%d-%H%M%S")
logging.basicConfig(filename=f"logs/lifeAIplayer-{log_id}.log", level=LOGLEVEL)
logger = logging.getLogger('lifeAIplayer')
ch = logging.StreamHandler()
ch.setLevel(LOGLEVEL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
context = zmq.Context()
socket = context.socket(zmq.SUB)
logger.info("connected to ZMQ in: %s:%d" % (args.input_host, args.input_port))
socket.connect(f"tcp://{args.input_host}:{args.input_port}")
socket.setsockopt_string(zmq.SUBSCRIBE, "")
sender = context.socket(zmq.PUB)
logger.info("connected to ZMQ out: %s:%d" % (args.output_host, args.output_port))
sender.bind(f"tcp://{args.output_host}:{args.output_port}")
pygame.init()
pygame.mixer.init(frequency=args.freq, size=-16, channels=2, buffer=args.buffer_size, devicename=args.sdl_audiodriver)
AUDIO_END_EVENT_MUSIC = pygame.USEREVENT + 1
AUDIO_END_EVENT_SPEECH = pygame.USEREVENT + 2
audio_channel_speech = pygame.mixer.Channel(1)
audio_channel_speech.set_endevent(AUDIO_END_EVENT_SPEECH)
audio_channel_speech.set_volume(args.speech_volume)