Skip to main content
  1. Home
  2. Knowledge Base
  3. eLearning Magic Toolkit - Introduction
  4. ALL JAVASCRIPT AND SHORTCODE INFO – QUICK ACCESS

ALL JAVASCRIPT AND SHORTCODE INFO – QUICK ACCESS

JavaScript To Transfer Custom Variables From Storyline Into WordPress

let player = GetPlayer();
player.SetVar("SL_ID",document.title);
let fields = {
STORYLINE_VARIABLE_NAME: '',
};
let data = {};
handleForm(data, fields, player);
}

function handleForm(data, fields, player) {

for (let [key, val] of Object.entries(fields)) {
let value = player.GetVar(key);
if( value ) {
data[key] = value;
}
}

data.action = 'save_user_data_json';
data.title = document.title;

var pathArray = window.location.pathname.split('/');
data.sl_name = pathArray[3];

let url = window.location.origin + '/wp-admin/admin-ajax.php';
fetch(url, {
method: 'POST',
credentials: 'same-origin',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Cache-Control': 'no-cache',
},
body: new URLSearchParams( data )
}).then(response => response.json())
.then(response => console.log(response.message))
.catch(err => console.log(err));

JavaScript To Transfer Stored User Variables From WordPress Into Storyline

var sluser = window.parent.userCode;

fetch ('/wp-content/uploads/jsons/results_' + sluser + '.json'). then (response => {
return response.json ();
}).then (data => {

var player = GetPlayer();

player.SetVar("STORYLINE_VARIABLE_NAME", data.SL##_VARIABLENAME);

}). catch (err => {
console.log ("No Data Available.")
});

Shortcode For Displaying User Variables On Any WordPress Post/Page

Make sure to remove the spaces before and after the opening and closing square brackets:

[ storyline_user_data options="VARIABLE-NAME" ]

Optional: Add an additional user=”USERNAME” value to your shortcode if you wish to display the stored variable for a particular user.

E.g.

[ storyline_user_data user="johnsmith" options="VARIABLE-NAME" ]

Shortcode For Creating A Leaderboard Using Any Stored Numeric Variable

[ sl_leaderboard variable="VARIABLE-NAME" ]

JavaScript To Send And Receive A Single Question Response From The Create Chat Completion API (ChatGPT)

var player = GetPlayer();
var prompt_text = player.GetVar("StorylineTextVariable");

var messages = []

var system_message = { role: "system", content: "You are a helpful and knowledgeable AI chatbot." };
var user_prompt00 = {role: "user", content: "Please answer the following question: " + prompt_text };

messages.push(system_message);
messages.push(user_prompt00);

 var sendData = {
    'nonce': parent.storylineMagicNonce,
    'value': JSON.stringify( messages ),
    'api': 'chatCompletion',
    'jsonresponse': 'false',
//OPTIONAL Include line below to use eLM Toolkit's content document functionality
//  'contextfile': 'ABC123',
};

sendData = JSON.stringify( sendData );

const myHeaders = new Headers();

myHeaders.append("Content-Type", "application/json");
myHeaders.append("X-WP-Nonce", parent.storylineMagicRestNonce);

async function openai_req() {

  fetch(parent.storylineMagicEndpoint, {
      method:'POST',
      headers: myHeaders,
      body: sendData
  })

  .then(res => res.json())
  .then(data => {
    if ( Object.prototype.hasOwnProperty.call(data.data, 'data') ) {
        var gpt_content = data.data.data.choices[0].message.content;
        gpt_content = gpt_content.trim();
        player.SetVar("StorylineAnswerTextVariable",gpt_content);
    }else{
        console.error('Error fetching data:', error);
    }

  })
  .catch(error => {
   console.error('Error fetching data:', error);
   })
};

openai_req();

Note: Context Document prompting requires a minimum of eLearning Magic Toolkit version 2.3. Inclusion of a context file is completely optional within your prompt, simply remove the line entirely from the var sendData section if not needed.

JavaScript To Continue Conversation With The Create Chat Completion API (ChatGPT) Using Previous Messages Thread

Store and receive each typed user request and ChatGPT API response as a variable in your SL360 project, and load them back into the next prompt request to the API by adding additional User and Assistant messages like so:

var player = GetPlayer();
var prompt_text_01 = player.GetVar("StorylineTextVariable01");
var gpt_response_01 = player.GetVar("StorylineAnswerVariable01");
var prompt_text_02 = player.GetVar("StorylineTextVariable02");
var gpt_response_02 = player.GetVar("StorylineAnswerVariable02");
var prompt_text_03 = player.GetVar("StorylineTextVariable03");

var messages = []

var system_message = { role: "system", content: "You are a helpful and knowledgeable AI chatbot." };
var user_prompt01 = {role: "user", content: "Please answer the following question: " + prompt_text_01 };
var gpt_response01 = {role: "assistant",  content: `${gpt_response_01}`}
var user_prompt02 = {role: "user", content: `${prompt_text_02}` };
var gpt_response02 = {role: "assistant", content: `${gpt_response_02}`}
var user_prompt03 = {role: "user", content: `${prompt_text_03}` };

messages.push(system_message);
messages.push(user_prompt01);
messages.push(gpt_response01);
messages.push(user_prompt02);
messages.push(gpt_response02);
messages.push(user_prompt03);

 var sendData = {
    'nonce': parent.storylineMagicNonce,
    'value': JSON.stringify( messages ),
    'api': 'chatCompletion',
    'jsonresponse': 'false',
//OPTIONAL Include line below to use eLM Toolkit's content document functionality
//  'contextfile': 'ABC123',
};

sendData = JSON.stringify( sendData );

const myHeaders = new Headers();

myHeaders.append("Content-Type", "application/json");
myHeaders.append("X-WP-Nonce", parent.storylineMagicRestNonce);

async function openai_req() {

  fetch(parent.storylineMagicEndpoint, {
      method:'POST',
      headers: myHeaders,
      body: sendData
  })

  .then(res => res.json())
  .then(data => {
    if ( Object.prototype.hasOwnProperty.call(data.data, 'data') ) {
        var gpt_content = data.data.data.choices[0].message.content;
        gpt_content = gpt_content.trim();
        player.SetVar("StorylineAnswerVariable03",gpt_content);
    }else{
        console.error('Error fetching data:', error);
    }

  })
  .catch(error => {
   console.error('Error fetching data:', error);
   })
};

openai_req();

Generate An Image Using The Create Image API (Dall-E)

Important – As of version release 2.3.2, all image generation requests must include a ‘size’ value within the sendData block. Refer to the OpenAI website for what sizes are available for each model (and their associated costs).

Store and receive each typed user request and ChatGPT API response as a variable in your SL360 project, and load them back into the next prompt request to the API by adding additional User and Assistant messages like so:

var player = GetPlayer();
const userpromptvalue = "Create a photographic image of: " + player.GetVar('StorylineTextVariable');

var sendData = {
    'nonce': parent.storylineMagicNonce,
    'value': userpromptvalue,
    'api': 'imageGeneration',
    'size': '1024x1024'
};

sendData = JSON.stringify( sendData );

const myHeaders = new Headers();

myHeaders.append("Content-Type", "application/json");
myHeaders.append("X-WP-Nonce", parent.storylineMagicRestNonce);

async function openai_req() {

  fetch(parent.storylineMagicEndpoint, {
      method:'POST',
      headers: myHeaders,
      body: sendData
  })

  .then(res => res.json())
  .then(data => {
    if ( Object.prototype.hasOwnProperty.call(data.data, 'data') && Object.prototype.hasOwnProperty.call(data.data.data, 'data') ) {
        var apiReturnedImage = data.data.data.data[0].url;
        apiReturnedImage = apiReturnedImage.trim();
        player.SetVar("StorylineVariableToStoreImgURL",apiReturnedImage);
    }else{
        console.error('Error fetching data:', error);
    }

  })
  .catch(error => {
   console.error('Error fetching data:', error);
   })
};

openai_req();

JavaScript To Generate A Natural Voice Audio Track Using ElevenLabs

var script = getVar("MYTEXT");
var messages = []

var sendData = {
    'nonce': parent.storylineMagicNonce,
    'api': 'textToSpeech',
    'modelId': 'eleven_multilingual_v2',
    'text':  script,
    'id': 'bVMeCyTHy58xNoL34h3p' //Selected from elevenLabs.io
};

sendData = JSON.stringify(sendData);
const myHeaders = new Headers();

myHeaders.append("Content-Type", "application/json");
myHeaders.append("X-WP-Nonce", parent.storylineMagicRestNonce);

async function elevenlabs_req() {

    try {
        console.log('Sending Request To ElevenLabs...');
        const response = await fetch(parent.storylineMagicEndpointElevenLabs, {
            method:'POST',
            headers: myHeaders,
            body: sendData
        });

        if (!response.ok) {
            throw new Error(`HTTP error! status: ${response.status}`);
        } else {
            console.log('ElevenLabs Response Returned.');
    const data = await response.json();
            const base64Audio = data.audio;
            const audioBytes = atob(base64Audio);
            const audioArray = new Uint8Array(audioBytes.length);
            for (let i = 0; i < audioBytes.length; i++) {
                audioArray[i] = audioBytes.charCodeAt(i);
            }
            const audioBlob = new Blob([audioArray], { type: 'audio/mpeg' });
            const audiodataUrl = URL.createObjectURL(audioBlob);
            
            // Store audio data URL for use by Storyline later
            setVar("ElevenLabsAudioURL", audiodataUrl);

            // Create a new audio object and set its source to the Object URL
            const audio = new Audio(audiodataUrl);

            // Play the audio
            console.log("Audio is playing");
            audio.play();
        }
    } catch (error) {
        console.error('An error occurred:', error);
    }
}
elevenlabs_req();
Was this article helpful?

Related Articles

Leave a Reply

This site uses Akismet to reduce spam. Learn how your comment data is processed.