diff --git a/api/app.js b/api/app.js index e0237ac..c161e7e 100644 --- a/api/app.js +++ b/api/app.js @@ -1,6 +1,7 @@ const express = require("express"); const bodyParser = require("body-parser"); const path = require("path"); +const cors = require("cors"); // Require the CORS package const fetchResults = require("./fetchresults"); const youtube = require("./youtube"); @@ -10,6 +11,7 @@ const app = express(); const PORT = process.env.PORT || 3000; app.use(bodyParser.json()); +app.use(cors()); // Use the CORS middleware app.use("/static", express.static(path.join(__dirname, "static"))); @@ -22,15 +24,14 @@ app.post("/process_video", async (req, res) => { try { const transcripts = await youtube.getVideoCaptions(videoId); const shorts = await fetchResults.extractShorts(transcripts.transcript); - console.log(`shorts are \n ${JSON.stringify(shorts)}`); return res.json({ success: true, shorts }); } catch (error) { - console.log("Catched error in app.js"); - console.error("An Error occured -> " + error); + console.log("Caught error in app.js"); + console.error("An Error occurred -> " + error); return res.json({ success: false, error: error.toString() }); } }); app.listen(PORT, () => { - console.log("Server is running on port 3000"); + console.log(`Server is running on port ${PORT}`); }); diff --git a/api/fetchresults.js b/api/fetchresults.js index 7e68c09..4dc7b59 100644 --- a/api/fetchresults.js +++ b/api/fetchresults.js @@ -17,11 +17,20 @@ const analyzeCaptions = async (text) => { { role: "system", content: - "You are an expert in analyzing video transcripts to identify coherent and engaging parts suitable for creating YouTube shorts. Evaluate the provided text chunks based on their clarity, relevance, and ability to stand alone as engaging content without needing external context. Identify the sections that can be turned into stand-alone YouTube shorts while ensuring they are clear, engaging, and not abruptly starting or ending. Make sure to remember all the data that is being passed and give back results based on the total data sent to you. If any error occurs, mention what the error is in short. If rate limit occurs, notify me of that too. ", + "You are an expert in analyzing video transcripts to identify coherent and engaging parts suitable for creating YouTube shorts. Evaluate the provided text chunks based on their clarity, relevance, and ability to stand alone as engaging content without needing external context. Identify the sections that can be turned into stand-alone YouTube shorts while ensuring they are clear, engaging, and not abruptly starting or ending. Make sure to remember all the daathat is being passed and give back results based on the total data sent to you. If any error occurs, mention what the error is. If you are unable to process the given video transcript at the moment, give a deatiled message why is it so ", }, { role: "user", - content: `From the given video transcript, identify the chunks that can best be transformed into compelling YouTube shorts and extract only 3 high quality shorts from this. Here's the text: ${text} Now extract shorts in the following JSON format: { [ { 'start_time:': float (in seconds), 'end_time': float (in seconds), 'title': string }, ... ] } The start and end timings are provided in minutes. + content: `From the given video transcript, identify the chunks that can best be transformed into compelling YouTube shorts and extract only 3 high quality shorts from this. Here's the text: ${text} Now extract shorts strictly in the following JSON format: { + "data": [ + { + "start_time": start_time, + "end_time": end_time, + "title": title (in string) + }, + // rest of objects + ] + } The start and end timings are provided in minutes. However. Using the provided timing, convert that necessarily into seconds when returning output. For example, 2:28 is 2 minutes and 28 seconds, which is 148 seconds so return 148 instead of 2.28. One more necessary condition should be that the extracted short time should lie between 15-20 seconds. The difference between start_time and end_time should necessarily lie between 12 to 23 seconds. The content of video lies in provided captions, whereas the corresponding timings lie in the given start_time `, @@ -41,13 +50,13 @@ const analyzeCaptions = async (text) => { Authorization: `Bearer ${process.env.OPENAI_API_KEY}`, "Content-Type": "application/json", }, - }, + } ); if (response.data.choices && response.data.choices[0]) { console.log( "response.data.choices[0].message.content is", - JSON.parse(response.data.choices[0].message.content), + JSON.parse(response.data.choices[0].message.content) ); return JSON.parse(response.data.choices[0].message.content); } else { diff --git a/api/static/script.js b/api/static/script.js index 7c784b4..b72c287 100644 --- a/api/static/script.js +++ b/api/static/script.js @@ -11,10 +11,11 @@ document.addEventListener("DOMContentLoaded", function () { }) .then((response) => response.json()) .then((data) => { + console.log(`Type of data is ${typeof data}`); + console.log(data); if (data.success) { console.log("Success"); - console.log(`data is ${JSON.stringify(data)}`); - embedVideos(data.shorts, url); + embedVideos(data.shorts[0], url); } else { console.log("An Error occurred in script.js in fetching shorts"); } @@ -26,22 +27,31 @@ document.addEventListener("DOMContentLoaded", function () { }); }); -function embedVideos(shorts, videoId) { +function embedVideos(data, videoId) { console.log("Going Good in embedVideos"); const container = document.querySelector(".videos-container"); container.innerHTML = ""; - shorts[0].data.forEach((short, index) => { - const { start_time, end_time } = short; - console.log("Start :" + start_time + " End: " + end_time); + + console.log( + `Shorts recieved in embedVideos is ${JSON.stringify( + data.data + )} \n data length is ${data.data.length}` + ); + for (let i = 0; i < data.data.length; i++) { + const { start_time, end_time, title } = data.data[i]; + console.log( + "Start: " + start_time + " End: " + end_time + " Title: " + title + ); const embedCode = ``; + )}&end=${Math.round(end_time)}&autoplay=0&mute=1" + width="260" + height="140" + frameborder="0" + allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" + allowfullscreen> + `; container.innerHTML += embedCode; - }); -} \ No newline at end of file + } +} diff --git a/package-lock.json b/package-lock.json index 010dd5d..afdaf77 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,6 +11,7 @@ "dependencies": { "axios": "^1.5.1", "body-parser": "^1.20.2", + "cors": "^2.8.5", "dotenv": "^16.3.1", "express": "^4.18.2" } @@ -133,6 +134,18 @@ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" }, + "node_modules/cors": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", + "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + } + }, "node_modules/debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", @@ -501,6 +514,14 @@ "node": ">= 0.6" } }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/object-inspect": { "version": "1.12.3", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", diff --git a/package.json b/package.json index e6f7323..05db31e 100644 --- a/package.json +++ b/package.json @@ -15,6 +15,7 @@ "dependencies": { "axios": "^1.5.1", "body-parser": "^1.20.2", + "cors": "^2.8.5", "dotenv": "^16.3.1", "express": "^4.18.2" }