2025-06-08 11:21:30 +01:00
< ? php
// Enhanced security headers
header ( 'X-Content-Type-Options: nosniff' );
header ( 'X-Frame-Options: DENY' );
header ( 'X-XSS-Protection: 1; mode=block' );
header ( 'Strict-Transport-Security: max-age=31536000; includeSubDomains' );
header ( 'Referrer-Policy: strict-origin-when-cross-origin' );
// Article-specific SEO variables
$article_title = " Scraping JavaScript-Heavy Sites: Advanced Techniques " ;
$article_description = " Master the challenges of extracting data from dynamic websites using modern browser automation and rendering techniques. Learn advanced JavaScript scraping methods. " ;
$article_keywords = " JavaScript scraping, dynamic website scraping, browser automation, Selenium scraping, Playwright scraping, SPA scraping " ;
$article_author = " UK Data Services Technical Team " ;
$canonical_url = " https://ukdataservices.co.uk/blog/articles/javascript-heavy-sites-scraping.php " ;
$article_published = " 2025-06-01T11:00:00+00:00 " ;
$article_modified = " 2025-06-01T16:45:00+00:00 " ;
$og_image = " https://ukdataservices.co.uk/assets/images/icon-web-scraping-v2.svg " ;
$read_time = 8 ;
?>
<! DOCTYPE html >
< html lang = " en " >
< head >
< meta charset = " UTF-8 " >
< meta name = " viewport " content = " width=device-width, initial-scale=1.0 " >
< title >< ? php echo htmlspecialchars ( $article_title ); ?> | UK Data Services Blog</title>
< meta name = " description " content = " <?php echo htmlspecialchars( $article_description ); ?> " >
< meta name = " keywords " content = " <?php echo htmlspecialchars( $article_keywords ); ?> " >
< meta name = " author " content = " <?php echo htmlspecialchars( $article_author ); ?> " >
< meta name = " robots " content = " index, follow " >
< link rel = " canonical " href = " <?php echo htmlspecialchars( $canonical_url ); ?> " >
<!-- Article - specific meta tags -->
< meta name = " article:published_time " content = " <?php echo $article_published ; ?> " >
< meta name = " article:modified_time " content = " <?php echo $article_modified ; ?> " >
< meta name = " article:author " content = " <?php echo htmlspecialchars( $article_author ); ?> " >
< meta name = " article:section " content = " Web Scraping " >
< meta name = " article:tag " content = " JavaScript, Web Scraping, Browser Automation, SPA " >
<!-- Preload critical resources -->
< link rel = " preload " href = " ../../assets/css/main.css " as = " style " >
< link rel = " preload " href = " ../../assets/images/ukds-main-logo.png " as = " image " >
<!-- Open Graph / Social Media -->
< meta property = " og:type " content = " article " >
< meta property = " og:url " content = " <?php echo htmlspecialchars( $canonical_url ); ?> " >
< meta property = " og:title " content = " <?php echo htmlspecialchars( $article_title ); ?> " >
< meta property = " og:description " content = " <?php echo htmlspecialchars( $article_description ); ?> " >
< meta property = " og:image " content = " <?php echo htmlspecialchars( $og_image ); ?> " >
< meta property = " og:image:width " content = " 1200 " >
< meta property = " og:image:height " content = " 630 " >
<!-- Twitter Card -->
< meta name = " twitter:card " content = " summary_large_image " >
< meta name = " twitter:title " content = " <?php echo htmlspecialchars( $article_title ); ?> " >
< meta name = " twitter:description " content = " <?php echo htmlspecialchars( $article_description ); ?> " >
< meta name = " twitter:image " content = " <?php echo htmlspecialchars( $og_image ); ?> " >
<!-- Favicon and App Icons -->
< link rel = " icon " type = " image/svg+xml " href = " ../../assets/images/favicon.svg " >
< link rel = " apple-touch-icon " sizes = " 180x180 " href = " ../../assets/images/apple-touch-icon.svg " >
<!-- Fonts -->
< link rel = " preconnect " href = " https://fonts.googleapis.com " >
< link rel = " preconnect " href = " https://fonts.gstatic.com " crossorigin >
< link href = " https://fonts.googleapis.com/css2?family=Roboto+Slab:wght@300;400;500;600;700&family=Lato:wght@300;400;500;600;700&display=swap " rel = " stylesheet " >
<!-- Styles -->
< link rel = " stylesheet " href = " ../../assets/css/main.css " >
<!-- Article Schema Markup -->
< script type = " application/ld+json " >
{
" @context " : " https://schema.org " ,
" @type " : " Article " ,
" headline " : " <?php echo htmlspecialchars( $article_title ); ?> " ,
" description " : " <?php echo htmlspecialchars( $article_description ); ?> " ,
" url " : " <?php echo htmlspecialchars( $canonical_url ); ?> " ,
" datePublished " : " <?php echo $article_published ; ?> " ,
" dateModified " : " <?php echo $article_modified ; ?> " ,
" author " : {
" @type " : " Organization " ,
" name " : " <?php echo htmlspecialchars( $article_author ); ?> " ,
" url " : " https://ukdataservices.co.uk "
},
" publisher " : {
" @type " : " Organization " ,
" name " : " UK Data Services " ,
" logo " : {
" @type " : " ImageObject " ,
" url " : " https://ukdataservices.co.uk/assets/images/ukds-main-logo.png " ,
" width " : 300 ,
" height " : 100
}
},
" image " : {
" @type " : " ImageObject " ,
" url " : " <?php echo htmlspecialchars( $og_image ); ?> " ,
" width " : 1200 ,
" height " : 630
},
" mainEntityOfPage " : {
" @type " : " WebPage " ,
" @id " : " <?php echo htmlspecialchars( $canonical_url ); ?> "
},
" articleSection " : " Web Scraping " ,
" keywords " : " <?php echo htmlspecialchars( $article_keywords ); ?> " ,
" wordCount " : 2500 ,
" timeRequired " : " PT<?php echo $read_time ; ?>M " ,
" inLanguage " : " en-GB "
}
</ script >
</ head >
< body >
<!-- Skip to content link for accessibility -->
< a href = " #main-content " class = " skip-to-content " > Skip to main content </ a >
2025-06-08 15:34:33 +00:00
< nav class = " navbar scrolled " id = " navbar " >
2025-06-08 11:21:30 +01:00
< div class = " nav-container " >
< div class = " nav-logo " >
2025-06-08 15:51:38 +00:00
< a href = " / " >
2025-06-08 11:21:30 +01:00
< img src = " ../../assets/images/ukds-main-logo.png " alt = " UK Data Services " class = " logo " loading = " eager " >
</ a >
</ div >
< div class = " nav-menu " id = " nav-menu " >
2025-06-08 15:51:38 +00:00
< a href = " / " class = " nav-link " > Home </ a >
< a href = " /#services " class = " nav-link " > Capabilities </ a >
< a href = " /project-types.php " class = " nav-link " > Project Types </ a >
< a href = " /about.php " class = " nav-link " > About </ a >
< a href = " /blog/ " class = " nav-link active " > Blog </ a >
< a href = " /#contact " class = " nav-link " > Contact </ a >
< a href = " /quote.php " class = " nav-link cta-button " > Request Consultation </ a >
2025-06-08 11:21:30 +01:00
</ div >
< div class = " nav-toggle " id = " nav-toggle " >
< span class = " bar " ></ span >
< span class = " bar " ></ span >
< span class = " bar " ></ span >
</ div >
</ div >
</ nav >
<!-- Breadcrumb Navigation -->
< div class = " breadcrumb " >
< nav aria - label = " Breadcrumb " >
< ol >
2025-06-08 15:51:38 +00:00
< li >< a href = " / " > Home </ a ></ li >
< li >< a href = " /blog/ " > Blog </ a ></ li >
2025-06-08 11:21:30 +01:00
< li >< a href = " ../categories/web-scraping.php " > Web Scraping </ a ></ li >
< li aria - current = " page " >< span > JavaScript Scraping </ span ></ li >
</ ol >
</ nav >
</ div >
<!-- Article Content -->
< main id = " main-content " >
< article class = " blog-article " >
< div class = " container " >
<!-- Article Header -->
< header class = " article-header " >
< div class = " article-meta " >
< a href = " ../categories/web-scraping.php " class = " category-link " > Web Scraping </ a >
< time datetime = " <?php echo $article_published ; ?> " class = " publish-date " > 1 June 2025 </ time >
< span class = " read-time " >< ? php echo $read_time ; ?> min read</span>
</ div >
< h1 class = " article-title " >< ? php echo htmlspecialchars ( $article_title ); ?> </h1>
< p class = " article-subtitle " >< ? php echo htmlspecialchars ( $article_description ); ?> </p>
< div class = " article-author " >
< div class = " author-info " >
< strong > By < ? php echo htmlspecialchars ( $article_author ); ?> </strong>
< p > Web scraping and automation specialists </ p >
</ div >
< div class = " article-share " >
< a href = " https://twitter.com/intent/tweet?text=<?php echo urlencode( $article_title ); ?>&url=<?php echo urlencode( $canonical_url ); ?> " target = " _blank " rel = " noopener " aria - label = " Share on Twitter " > 📤 Share </ a >
</ div >
</ div >
</ header >
<!-- Table of Contents -->
< nav class = " article-toc " >
< h2 > Table of Contents </ h2 >
< ol >
< li >< a href = " #understanding-challenges " > Understanding the Challenges </ a ></ li >
< li >< a href = " #browser-automation " > Browser Automation Tools </ a ></ li >
< li >< a href = " #playwright-techniques " > Playwright Advanced Techniques </ a ></ li >
< li >< a href = " #selenium-strategies " > Selenium Optimization Strategies </ a ></ li >
< li >< a href = " #performance-optimization " > Performance Optimization </ a ></ li >
< li >< a href = " #common-patterns " > Common Patterns & Solutions </ a ></ li >
< li >< a href = " #best-practices " > Best Practices & Ethics </ a ></ li >
< li >< a href = " #conclusion " > Conclusion </ a ></ li >
</ ol >
</ nav >
<!-- Article Content -->
< div class = " article-content " >
< section id = " understanding-challenges " >
< h2 > Understanding the Challenges of JavaScript - Heavy Sites </ h2 >
< p > Modern web applications increasingly rely on JavaScript frameworks like React , Vue . js , and Angular to create dynamic , interactive experiences . While this enhances user experience , it presents significant challenges for traditional web scraping approaches that rely on static HTML parsing .</ p >
< h3 > Why Traditional Scraping Fails </ h3 >
< p > Traditional HTTP - based scraping tools see only the initial HTML document before JavaScript execution . For JavaScript - heavy sites , this means :</ p >
< ul >
< li >< strong > Empty or minimal content :</ strong > The initial HTML often contains just loading placeholders </ li >
< li >< strong > Missing dynamic elements :</ strong > Content loaded via AJAX calls isn ' t captured </ li >
< li >< strong > No user interactions :</ strong > Data that appears only after clicks , scrolls , or form submissions is inaccessible </ li >
< li >< strong > Client - side routing :</ strong > SPAs ( Single Page Applications ) handle navigation without full page reloads </ li >
</ ul >
< div class = " callout-box " >
< h3 > 💡 Key Insight </ h3 >
< p > Over 70 % of modern websites use some form of JavaScript for content loading , making browser automation essential for comprehensive data extraction .</ p >
</ div >
</ section >
< section id = " browser-automation " >
< h2 > Browser Automation Tools Overview </ h2 >
< p > Browser automation tools control real browsers programmatically , allowing you to interact with JavaScript - heavy sites as a user would . Here are the leading options :</ p >
< div class = " comparison-grid " >
< div class = " comparison-item " >
< h4 > 🎭 Playwright </ h4 >
< p >< strong > Best for :</ strong > Modern web apps , cross - browser testing , high performance </ p >
< div class = " pros-cons " >
< strong > Pros :</ strong > Fast , reliable , excellent API design , built - in waiting mechanisms
</ div >
</ div >
< div class = " comparison-item " >
< h4 > 🔧 Selenium </ h4 >
< p >< strong > Best for :</ strong > Mature ecosystems , extensive browser support , legacy compatibility </ p >
< div class = " pros-cons " >
< strong > Pros :</ strong > Mature , extensive documentation , large community support
</ div >
</ div >
< div class = " comparison-item " >
< h4 > 🚀 Puppeteer </ h4 >
< p >< strong > Best for :</ strong > Chrome - specific tasks , Node . js environments , PDF generation </ p >
< div class = " pros-cons " >
< strong > Pros :</ strong > Chrome - optimized , excellent for headless operations
</ div >
</ div >
</ div >
</ section >
< section id = " playwright-techniques " >
< h2 > Playwright Advanced Techniques </ h2 >
< p > Playwright offers the most modern approach to browser automation with excellent performance and reliability . Here ' s how to leverage its advanced features :</ p >
< h3 > Smart Waiting Strategies </ h3 >
< p > Playwright ' s auto - waiting capabilities reduce the need for manual delays :</ p >
< pre >< code >// Wait for network to be idle ( no requests for 500 ms )
await page . waitForLoadState ( 'networkidle' );
// Wait for specific element to be visible
await page . waitForSelector ( '.dynamic-content' , { state : 'visible' });
// Wait for JavaScript to finish execution
await page . waitForFunction (() => window . dataLoaded === true ); </ code ></ pre >
< h3 > Handling Dynamic Content </ h3 >
< p > For content that loads asynchronously :</ p >
< pre >< code >// Wait for API response and content update
await page . route ( '**/api/data' , route => {
// Optionally modify or monitor requests
route . continue ();
});
// Trigger action and wait for response
await page . click ( '.load-more-button' );
await page . waitForResponse ( '**/api/data' );
await page . waitForSelector ( '.new-items' ); </ code ></ pre >
< h3 > Infinite Scroll Handling </ h3 >
< p > Many modern sites use infinite scroll for content loading :</ p >
< pre >< code > async function handleInfiniteScroll ( page , maxScrolls = 10 ) {
let scrollCount = 0 ;
let previousHeight = 0 ;
while ( scrollCount < maxScrolls ) {
// Scroll to bottom
await page . evaluate (() => window . scrollTo ( 0 , document . body . scrollHeight ));
// Wait for new content to load
await page . waitForTimeout ( 2000 );
// Check if new content appeared
const currentHeight = await page . evaluate (() => document . body . scrollHeight );
if ( currentHeight === previousHeight ) break ;
previousHeight = currentHeight ;
scrollCount ++ ;
}
} </ code ></ pre >
</ section >
< section id = " selenium-strategies " >
< h2 > Selenium Optimization Strategies </ h2 >
< p > While Playwright is often preferred for new projects , Selenium remains widely used and can be highly effective with proper optimization :</ p >
< h3 > WebDriverWait Best Practices </ h3 >
< p > Explicit waits are crucial for reliable Selenium scripts :</ p >
< pre >< code > from selenium . webdriver . support . ui import WebDriverWait
from selenium . webdriver . support import expected_conditions as EC
from selenium . webdriver . common . by import By
# Wait for element to be clickable
wait = WebDriverWait ( driver , 10 )
element = wait . until ( EC . element_to_be_clickable (( By . CLASS_NAME , 'load-more' )))
# Wait for text to appear in element
wait . until ( EC . text_to_be_present_in_element (( By . ID , 'status' ), 'Loaded' ))
# Wait for all elements to load
wait . until ( lambda driver : len ( driver . find_elements ( By . CLASS_NAME , 'item' )) > 0 ) </ code ></ pre >
< h3 > Handling AJAX Requests </ h3 >
< p > Monitor network activity to determine when content is fully loaded :</ p >
< pre >< code > # Custom wait condition for AJAX completion
class ajax_complete :
def __call__ ( self , driver ) :
return driver . execute_script ( " return jQuery.active == 0 " )
# Use the custom wait condition
wait . until ( ajax_complete ()) </ code ></ pre >
</ section >
< section id = " performance-optimization " >
< h2 > Performance Optimization Techniques </ h2 >
< p > Browser automation can be resource - intensive . Here are strategies to improve performance :</ p >
< h3 > Headless Mode Optimization </ h3 >
< ul >
< li >< strong > Disable images :</ strong > Reduce bandwidth and loading time </ li >
< li >< strong > Block ads and trackers :</ strong > Speed up page loads </ li >
< li >< strong > Reduce browser features :</ strong > Disable unnecessary plugins and extensions </ li >
</ ul >
< h3 > Parallel Processing </ h3 >
< p > Scale your scraping with concurrent browser instances :</ p >
< pre >< code > import asyncio
from playwright . async_api import async_playwright
async def scrape_page ( url ) :
async with async_playwright () as p :
browser = await p . chromium . launch ()
page = await browser . new_page ()
await page . goto ( url )
# Scraping logic here
await browser . close ()
# Run multiple scraping tasks concurrently
urls = [ 'url1' , 'url2' , 'url3' ]
await asyncio . gather ( * [ scrape_page ( url ) for url in urls ]) </ code ></ pre >
< h3 > Resource Management </ h3 >
< ul >
< li >< strong > Browser pooling :</ strong > Reuse browser instances across requests </ li >
< li >< strong > Memory monitoring :</ strong > Restart browsers when memory usage gets high </ li >
< li >< strong > Connection limits :</ strong > Respect server resources with appropriate delays </ li >
</ ul >
</ section >
< section id = " common-patterns " >
< h2 > Common Patterns & Solutions </ h2 >
< p > Here are proven patterns for handling specific JavaScript scraping challenges :</ p >
< h3 > Single Page Applications ( SPAs ) </ h3 >
< p > SPAs update content without full page reloads , requiring special handling :</ p >
< ul >
< li >< strong > URL monitoring :</ strong > Watch for hash or path changes </ li >
< li >< strong > State detection :</ strong > Check for application state indicators </ li >
< li >< strong > Component waiting :</ strong > Wait for specific UI components to render </ li >
</ ul >
< h3 > API Interception </ h3 >
< p > Sometimes it ' s more efficient to intercept API calls directly :</ p >
< pre >< code >// Intercept and capture API responses
const apiData = [];
await page . route ( '**/api/**' , route => {
route . continue () . then ( response => {
response . json () . then ( data => {
apiData . push ( data );
});
});
});
// Navigate and trigger API calls
await page . goto ( url );
// The API data is now captured in apiData array</code></pre>
< h3 > Form Interactions </ h3 >
< p > Automate complex form interactions for data behind login screens :</ p >
< ul >
< li >< strong > Cookie management :</ strong > Maintain session state across requests </ li >
< li >< strong > CSRF tokens :</ strong > Handle security tokens dynamically </ li >
< li >< strong > Multi - step forms :</ strong > Navigate through wizard - style interfaces </ li >
</ ul >
</ section >
< section id = " best-practices " >
< h2 > Best Practices & Ethical Considerations </ h2 >
< p > Responsible JavaScript scraping requires careful attention to technical and ethical considerations :</ p >
< h3 > Technical Best Practices </ h3 >
< ul >
< li >< strong > Robust error handling :</ strong > Gracefully handle timeouts and failures </ li >
< li >< strong > User - agent rotation :</ strong > Vary browser fingerprints appropriately </ li >
< li >< strong > Rate limiting :</ strong > Implement delays between requests </ li >
< li >< strong > Data validation :</ strong > Verify extracted data quality </ li >
</ ul >
< h3 > Ethical Guidelines </ h3 >
< ul >
< li >< strong > Respect robots . txt :</ strong > Follow website scraping guidelines </ li >
< li >< strong > Terms of service :</ strong > Review and comply with website terms </ li >
< li >< strong > Data protection :</ strong > Handle personal data according to GDPR </ li >
< li >< strong > Server resources :</ strong > Avoid overwhelming target servers </ li >
</ ul >
< div class = " best-practice-box " >
< h3 > 🛡️ Legal Compliance </ h3 >
< p > Always ensure your JavaScript scraping activities comply with UK data protection laws . For comprehensive guidance , see our < a href = " web-scraping-compliance-uk-guide.php " > complete compliance guide </ a >.</ p >
</ div >
</ section >
< section id = " conclusion " >
< h2 > Conclusion </ h2 >
< p > Scraping JavaScript - heavy sites requires a shift from traditional HTTP - based approaches to browser automation tools . While this adds complexity , it opens up access to the vast majority of modern web applications .</ p >
< h3 > Key Takeaways </ h3 >
< ol >
< li >< strong > Choose the right tool :</ strong > Playwright for modern apps , Selenium for compatibility </ li >
< li >< strong > Master waiting strategies :</ strong > Proper synchronization is crucial </ li >
< li >< strong > Optimize performance :</ strong > Use headless mode and parallel processing </ li >
< li >< strong > Handle common patterns :</ strong > SPAs , infinite scroll , and API interception </ li >
< li >< strong > Stay compliant :</ strong > Follow legal and ethical guidelines </ li >
</ ol >
< div class = " expert-consultation-cta " >
< h3 > Need Expert JavaScript Scraping Solutions ? </ h3 >
< p > Our technical team specializes in complex JavaScript scraping projects with full compliance and optimization .</ p >
< a href = " ../../quote.php?service=javascript-scraping " class = " btn btn-primary " > Get Technical Consultation </ a >
</ div >
</ section >
</ div >
<!-- Related Articles -->
< section class = " related-articles " >
< h2 > Related Articles </ h2 >
< div class = " related-grid " >
< article class = " related-card " >
< h3 >< a href = " web-scraping-compliance-uk-guide.php " > Complete Guide to Web Scraping Compliance in the UK </ a ></ h3 >
< p > Ensure your JavaScript scraping activities remain fully compliant with UK data protection laws .</ p >
< span class = " read-time " > 12 min read </ span >
</ article >
< article class = " related-card " >
< h3 >< a href = " selenium-vs-playwright-comparison.php " > Selenium vs Playwright : Choose the Right Tool </ a ></ h3 >
< p > Comprehensive comparison of browser automation tools with performance benchmarks .</ p >
< span class = " read-time " > 12 min read </ span >
</ article >
< article class = " related-card " >
< h3 >< a href = " ../categories/web-scraping.php " > More Web Scraping Articles </ a ></ h3 >
< p > Explore our complete collection of web scraping guides and tutorials .</ p >
< span class = " read-time " > Browse category </ span >
</ article >
</ div >
</ section >
</ div >
</ article >
<!-- CTA Section -->
< section class = " cta " >
< div class = " container " >
< div class = " cta-content " >
< h2 > Need Professional JavaScript Scraping Services ? </ h2 >
< p > Our expert team handles complex JavaScript - heavy sites with advanced automation and full compliance .</ p >
< div class = " cta-buttons " >
2025-06-08 15:51:38 +00:00
< a href = " /quote.php " class = " btn btn-primary " > Get Free Consultation </ a >
< a href = " /#services " class = " btn btn-secondary " > Explore Scraping Services </ a >
2025-06-08 11:21:30 +01:00
</ div >
</ div >
</ div >
</ section >
</ main >
<!-- Footer -->
< footer class = " footer " >
< div class = " container " >
< div class = " footer-content " >
< div class = " footer-section " >
< div class = " footer-logo " >
< img src = " ../../assets/images/logo-white.svg " alt = " UK Data Services " loading = " lazy " >
</ div >
< p > Enterprise data intelligence solutions for modern British business . Transform your operations with accurate , actionable insights and regulatory - compliant data services .</ p >
</ div >
< div class = " footer-section " >
< h3 > Web Scraping Services </ h3 >
< ul >
2025-06-08 15:51:38 +00:00
< li >< a href = " /#services " > JavaScript Scraping </ a ></ li >
< li >< a href = " /#services " > Browser Automation </ a ></ li >
< li >< a href = " /#services " > SPA Data Extraction </ a ></ li >
< li >< a href = " /#services " > API Integration </ a ></ li >
< li >< a href = " /#services " > Custom Solutions </ a ></ li >
2025-06-08 11:21:30 +01:00
</ ul >
</ div >
< div class = " footer-section " >
< h3 > Resources </ h3 >
< ul >
2025-06-08 15:51:38 +00:00
< li >< a href = " / " > Technical Blog </ a ></ li >
< li >< a href = " /case-studies/ " > Case Studies </ a ></ li >
< li >< a href = " /about.php " > Technical Team </ a ></ li >
< li >< a href = " /project-types.php " > Project Types </ a ></ li >
< li >< a href = " /quote.php " > Get Quote </ a ></ li >
2025-06-08 11:21:30 +01:00
</ ul >
</ div >
< div class = " footer-section " >
< h3 > Legal & Support </ h3 >
< ul >
2025-06-08 15:51:38 +00:00
< li >< a href = " /privacy-policy.php " > Privacy Policy </ a ></ li >
< li >< a href = " /terms-of-service.php " > Terms of Service </ a ></ li >
< li >< a href = " /cookie-policy.php " > Cookie Policy </ a ></ li >
< li >< a href = " /gdpr-compliance.php " > GDPR Compliance </ a ></ li >
< li >< a href = " /#contact " > Technical Support </ a ></ li >
2025-06-08 11:21:30 +01:00
</ ul >
</ div >
</ div >
< div class = " footer-bottom " >
< p >& copy ; < ? php echo date ( 'Y' ); ?> UK Data Services. All rights reserved.</p>
< div class = " social-links " >
< a href = " https://www.linkedin.com/company/uk-data-services " aria - label = " LinkedIn " rel = " noopener " target = " _blank " >
< img src = " ../../assets/images/icon-linkedin.svg " alt = " LinkedIn " loading = " lazy " >
</ a >
< a href = " https://twitter.com/ukdataservices " aria - label = " Twitter " rel = " noopener " target = " _blank " >
< img src = " ../../assets/images/icon-twitter.svg " alt = " Twitter " loading = " lazy " >
</ a >
</ div >
</ div >
</ div >
</ footer >
<!-- Scripts -->
< script src = " ../../assets/js/main.js " ></ script >
<!-- Article - specific functionality -->
< script >
document . addEventListener ( 'DOMContentLoaded' , function () {
// Code block copy functionality
const codeBlocks = document . querySelectorAll ( 'pre code' );
codeBlocks . forEach (( block , index ) => {
const pre = block . parentElement ;
// Add click handler for copy functionality
pre . addEventListener ( 'click' , function ( e ) {
if ( e . target === this || e . target === block ) {
// Copy code to clipboard
const text = block . textContent ;
navigator . clipboard . writeText ( text ) . then (() => {
// Show temporary feedback
const originalBefore = this . style . content ;
this . setAttribute ( 'data-copied' , 'true' );
setTimeout (() => {
this . removeAttribute ( 'data-copied' );
}, 2000 );
}) . catch ( err => {
console . log ( 'Copy failed:' , err );
});
}
});
});
// Reading progress indicator
const article = document . querySelector ( '.article-content' );
const progressBar = document . createElement ( 'div' );
progressBar . className = 'reading-progress' ;
progressBar . style . cssText = `
position : fixed ;
top : 70 px ;
left : 0 ;
width : 0 % ;
height : 3 px ;
background : linear - gradient ( 90 deg , #179e83, #144784);
z - index : 999 ;
transition : width 0.3 s ease ;
` ;
document . body . appendChild ( progressBar );
function updateReadingProgress () {
const articleRect = article . getBoundingClientRect ();
const articleHeight = article . offsetHeight ;
const viewportHeight = window . innerHeight ;
const scrolled = Math . max ( 0 , - articleRect . top );
const progress = Math . min ( 100 , ( scrolled / ( articleHeight - viewportHeight )) * 100 );
progressBar . style . width = progress + '%' ;
}
window . addEventListener ( 'scroll' , updateReadingProgress );
updateReadingProgress ();
// Smooth scrolling for table of contents
const tocLinks = document . querySelectorAll ( '.article-toc a' );
tocLinks . forEach ( link => {
link . addEventListener ( 'click' , function ( e ) {
e . preventDefault ();
const targetId = this . getAttribute ( 'href' );
const targetSection = document . querySelector ( targetId );
if ( targetSection ) {
const headerOffset = 80 ;
const elementPosition = targetSection . getBoundingClientRect () . top ;
const offsetPosition = elementPosition + window . pageYOffset - headerOffset ;
window . scrollTo ({
top : offsetPosition ,
behavior : 'smooth'
});
}
});
});
});
</ script >
</ body >
</ html >