How to stream data from server to client?

I have also tried the following.
The client code:

  def handle_result(self, result):
    print(result)

  def submitbtn_click(self, **event_args):
    # Start background task
    openai_bgtask = anvil.server.call('get_intermediate_response')

    # Perform openai request
    result_hndl = call_async('getresponse', "Some prompt")
    result_hndl.on_result(self.handle_result)

    # Read state and intermediate result from background task
    task_state = True
    while task_state:
      self.response.text = openai_bgtask.get_state(['result'])
      sleep(0.5)
      task_state = openai_bgtask.get_state(['status'])

    openai_bgtask.kill()

On the server side there is the following code:

llmresult = ""
llmstate = True

class MyCustomHandler(BaseCallbackHandler):
  def on_llm_new_token(self, token: str, **kwargs) -> None:
    global llmresult
    llmresult += token

  def on_llm_end(self, response, **kwargs) -> None:
    global llm_state
    llm_state = False


@authenticated_callable
def get_intermediate_response():
  task = anvil.server.launch_background_task('read_llm_output')
  return task


@anvil.server.background_task
def read_llm_output():
  global llmresult
  global llmstate

  llmstate = True
  anvil.server.task_state['status'] = llmstate

  while llmstate:
    anvil.server.task_state['result'] = llmresult
    sleep(0.1)

  anvil.server.task_state['status'] = llmstate

When I do some debugging the global variable llmresult is assigned with the text coming from the on_llm_new_token event.
But the background task does not see this global variable.
Also

self.response.text = openai_bgtask.get_state(['result'])

is blocking and only returns when the process from:

result_hndl = call_async('getresponse', "Some prompt")

is completed.